diff --git a/.arclint b/.arclint index ffddd414f..86853f87c 100644 --- a/.arclint +++ b/.arclint @@ -1,320 +1,321 @@ { "linters": { "generated": { "type": "generated" }, "clang-format": { "type": "clang-format", "version": ">=12.0", "bin": ["clang-format-12", "clang-format"], "include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "autopep8": { "type": "autopep8", "version": ">=1.3.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--aggressive", "--ignore=W503,W504", "--max-line-length=88" ] }, "flake8": { "type": "flake8", "version": ">=5.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ - "--ignore=E303,E305,E501,E704,W503,W504" + "--ignore=E303,E305,E501,E704,W503,W504", + "--require-plugins=flake8-comprehensions" ] }, "lint-format-strings": { "type": "lint-format-strings", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/test/fuzz/strprintf.cpp$)" ] }, "check-doc": { "type": "check-doc", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)" }, "lint-tests": { "type": "lint-tests", "include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)" }, "phpcs": { "type": "phpcs", "include": "(\\.php$)", "exclude": [ "(^arcanist/__phutil_library_.+\\.php$)" ], "phpcs.standard": "arcanist/phpcs.xml" }, "lint-locale-dependence": { "type": "lint-locale-dependence", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/))", "(^src/bench/nanobench.h$)" ] }, "lint-cheader": { "type": "lint-cheader", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "spelling": { "type": "spelling", "exclude": [ "(^build-aux/m4/)", "(^depends/)", "(^doc/release-notes/)", "(^contrib/gitian-builder/)", "(^src/(qt/locale|secp256k1|univalue|leveldb)/)", "(^test/lint/dictionary/)", "(package-lock.json)" ], "spelling.dictionaries": [ "test/lint/dictionary/english.json" ] }, "lint-assert-with-side-effects": { "type": "lint-assert-with-side-effects", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-quotes": { "type": "lint-include-quotes", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-guard": { "type": "lint-include-guard", "include": "(^(src|chronik)/.*\\.h$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/tinyformat.h$)" ] }, "lint-include-source": { "type": "lint-include-source", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-std-chrono": { "type": "lint-std-chrono", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-stdint": { "type": "lint-stdint", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/assumptions.h$)" ] }, "lint-source-filename": { "type": "lint-source-filename", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-boost-dependencies": { "type": "lint-boost-dependencies", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-python-encoding": { "type": "lint-python-encoding", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-python-shebang": { "type": "lint-python-shebang", "include": "(\\.py$)", "exclude": [ "(__init__\\.py$)", "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-bash-shebang": { "type": "lint-bash-shebang", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)" ] }, "shellcheck": { "type": "shellcheck", "version": ">=0.7.0", "flags": [ "--external-sources", "--source-path=SCRIPTDIR" ], "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)" ] }, "lint-shell-locale": { "type": "lint-shell-locale", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)", "(^cmake/utils/log-and-print-on-failure.sh)" ] }, "lint-cpp-void-parameters": { "type": "lint-cpp-void-parameters", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/glibc_compat.cpp$)" ] }, "lint-logs": { "type": "lint-logs", "include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)" }, "lint-qt": { "type": "lint-qt", "include": "(^src/qt/.*\\.(h|cpp)$)", "exclude": [ "(^src/qt/(locale|forms|res)/)" ] }, "lint-doxygen": { "type": "lint-doxygen", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-whitespace": { "type": "lint-whitespace", "include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "yamllint": { "type": "yamllint", "include": "(\\.(yml|yaml)$)", "exclude": "(^src/(secp256k1|univalue|leveldb)/)" }, "lint-check-nonfatal": { "type": "lint-check-nonfatal", "include": [ "(^src/rpc/.*\\.(h|c|cpp)$)", "(^src/wallet/rpc*.*\\.(h|c|cpp)$)" ], "exclude": "(^src/rpc/server.cpp)" }, "lint-markdown": { "type": "lint-markdown", "include": [ "(\\.md$)" ], "exclude": "(^contrib/gitian-builder/)" }, "lint-python-mypy": { "type": "lint-python-mypy", "version": ">=0.780", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", "(^contrib/macdeploy/)" ], "flags": [ "--ignore-missing-imports" ] }, "lint-python-mutable-default": { "type": "lint-python-mutable-default", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "prettier": { "type": "prettier", "version":">=2.4.1", "include": "(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)", "exclude": "(^web/.*/translations/.*\\.json$)" }, "lint-python-isort": { "type": "lint-python-isort", "version": ">=5.6.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "rustfmt": { "type": "rustfmt", "version": ">=1.5.1", "include": "(\\.rs$)" }, "eslint": { "type": "eslint", "version": ">=8.0.0", "include": [ "(cashtab/.*\\.js$)", "(apps/alias-server/.*\\.js$)", "(modules/ecashaddrjs/.*\\.js$)", "(apps/ecash-herald/.*\\.js$)" ] }, "lint-python-flynt": { "type": "lint-python-flynt", "version": ">=0.78", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] } } } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e9375d07a..f1d141830 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,275 +1,275 @@ Contributing to Bitcoin ABC =========================== The Bitcoin ABC project welcomes contributors! This guide is intended to help developers contribute effectively to Bitcoin ABC. Communicating with Developers ----------------------------- To get in contact with Bitcoin ABC developers, you can join the [eCash Development Telegram group](https://t.me/eCashDevelopment). The intent of this group is to facilitate development of Bitcoin ABC and other eCash node implementations. We welcome people who wish to participate. Acceptable use of this group includes the following: * Introducing yourself to other eCash developers. * Getting help with your development environment. * Discussing how to complete a patch. It is not for: * Market discussion * Non-constructive criticism Bitcoin ABC Development Philosophy ---------------------------------- Bitcoin ABC aims for fast iteration and continuous integration. This means that there should be quick turnaround for patches to be proposed, reviewed, and committed. Changes should not sit in a queue for long. Here are some tips to help keep the development working as intended. These are guidelines for the normal and expected development process. Developers can use their judgement to deviate from these guidelines when they have a good reason to do so. - Keep each change small and self-contained. - Reach out for a 1-on-1 review so things move quickly. - Land the Diff quickly after it is accepted. - Don't amend changes after the Diff accepted, new Diff for another fix. - Review Diffs from other developers as quickly as possible. - Large changes should be broken into logical chunks that are easy to review, and keep the code in a functional state. - Do not mix moving stuff around with changing stuff. Do changes with renames on their own. - Sometimes you want to replace one subsystem by another implementation, in which case it is not possible to do things incrementally. In such cases, you keep both implementations in the codebase for a while, as described [here](http://sevangelatos.com/john-carmack-on-parallel-implementations/) - There are no "development" branches, all Diffs apply to the master branch, and should always improve it (no regressions). - Don't break the build, it is important to keep master green as much as possible. If a Diff is landed, and breaks the build, fix it quickly. If it cannot be fixed quickly, it should be reverted, and re-applied later when it no longer breaks the build. - As soon as you see a bug, you fix it. Do not continue on. Fixing the bug becomes the top priority, more important than completing other tasks. - Automate as much as possible, and spend time on things only humans can do. Here are some handy links for development practices aligned with Bitcoin ABC: - [Developer Notes](doc/developer-notes.md) - [Statement of Bitcoin ABC Values and Visions](https://archive.md/ulgFI) - [How to Make Your Code Reviewer Fall in Love with You](https://mtlynch.io/code-review-love/) - [Large Diffs Are Hurting Your Ability To Ship](https://medium.com/@kurtisnusbaum/large-diffs-are-hurting-your-ability-to-ship-e0b2b41e8acf) - [Stacked Diffs: Keeping Phabricator Diffs Small](https://medium.com/@kurtisnusbaum/stacked-diffs-keeping-phabricator-diffs-small-d9964f4dcfa6) - [Parallel Implementations](http://sevangelatos.com/john-carmack-on-parallel-implementations/) - [The Pragmatic Programmer: From Journeyman to Master](https://www.amazon.com/Pragmatic-Programmer-Journeyman-Master/dp/020161622X) - [Monorepo: Advantages of monolithic version control](https://danluu.com/monorepo/) - [Monorepo: Why Google Stores Billions of Lines of Code in a Single Repository](https://www.youtube.com/watch?v=W71BTkUbdqE) - [The importance of fixing bugs immediately](https://youtu.be/E2MIpi8pIvY?t=16m0s) - [Slow Deployment Causes Meetings](https://www.facebook.com/notes/kent-beck/slow-deployment-causes-meetings/1055427371156793/) - [Good Work, Great Work, and Right Work](https://forum.dlang.org/post/q7u6g1$94p$1@digitalmars.com) - [Accelerate: The Science of Lean Software and DevOps](https://www.amazon.com/Accelerate-Software-Performing-Technology-Organizations/dp/1942788339) - [Facebook Engineering Process with Kent Beck](https://softwareengineeringdaily.com/2019/08/28/facebook-engineering-process-with-kent-beck/) - [Trunk Based Development](https://trunkbaseddevelopment.com/) - [Step-by-step: Programming incrementally](https://ourmachinery.com/post/step-by-step-programming-incrementally/) - [Semantic Compression](https://caseymuratori.com/blog_0015) Getting set up with the Bitcoin ABC Repository ---------------------------------------------- 1. Create an account at [reviews.bitcoinabc.org](https://reviews.bitcoinabc.org/) 2. Install Git and Arcanist on your machine Git documentation can be found at [git-scm.com](https://git-scm.com/). For Arcanist documentation, you can read [Arcanist Quick Start](https://secure.phabricator.com/book/phabricator/article/arcanist_quick_start/) and the [Arcanist User Guide](https://secure.phabricator.com/book/phabricator/article/arcanist/). To install these packages on Debian or Ubuntu, type: `sudo apt-get install git arcanist` 3. If you do not already have an SSH key set up, follow these steps: Type: `ssh-keygen -t ed25519 -C "your_email@example.com"` Enter a file in which to save the key (/home/*username*/.ssh/id_ed25519): [Press enter] 4. Upload your SSH public key to - Go to: `https://reviews.bitcoinabc.org/settings/user/*username*/page/ssh/` - Under "SSH Key Actions", Select "Upload Public Key" Paste contents from: `/home/*username*/.ssh/id_ed25519.pub` 5. Clone the repository and install Arcanist certificate: ``` git clone ssh://vcs@reviews.bitcoinabc.org:2221/source/bitcoin-abc.git cd bitcoin-abc arc install-certificate ``` Note: Arcanist tooling will tend to fail if your remote origin is set to something other than the above. A common mistake is to clone from Github and then forget to update your remotes. Follow instructions provided by `arc install-certificate` to provide your API token. Contributing to the node software --------------------------------- During submission of patches, arcanist will automatically run `arc lint` to enforce Bitcoin ABC code formatting standards, and often suggests changes. If code formatting tools do not install automatically on your system, you will have to install the following: Install all the code formatting tools on Debian Bullseye (11) or Ubuntu 20.04: ``` sudo apt-get install python3-autopep8 python3-pip php-codesniffer shellcheck yamllint -pip3 install "isort>=5.6.4" "mypy>=0.780" "flynt>=0.78" "flake8>=5" +pip3 install "isort>=5.6.4" "mypy>=0.780" "flynt>=0.78" "flake8>=5" flake8-comprehensions echo "export PATH=\"`python3 -m site --user-base`/bin:\$PATH\"" >> ~/.bashrc source ~/.bashrc ``` If not available in the distribution, `clang-format-12` and `clang-tidy-12` can be installed from or . For example, for macOS: ``` curl -L https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.0/clang+llvm-12.0.0-x86_64-apple-darwin.tar.xz | tar -xJv ln -s $PWD/clang+llvm-12.0.0-x86_64-apple-darwin/bin/clang-format /usr/local/bin/clang-format ln -s $PWD/clang+llvm-12.0.0-x86_64-apple-darwin/bin/clang-tidy /usr/local/bin/clang-tidy ``` If you are modifying a shell script, you will need to install the `shellcheck` linter. A recent version is required and may not be packaged for your distribution. Standalone binaries are available for download on [the project's github release page](https://github.com/koalaman/shellcheck/releases). **Note**: In order for arcanist to detect the `shellcheck` executable, you need to make it available in your `PATH`; if another version is already installed, make sure the recent one is found first. Arcanist will tell you what version is expected and what is found when running `arc lint` against a shell script. If you are running Debian 10, it is also available in the backports repository: ``` sudo apt-get -t buster-backports install shellcheck ``` If you are modifying Rust files, you will need to install a stable rust version, plus a nightly toolchain called "abc-nightly" for formatting: ```bash # Install latest stable Rust version curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s source ~/.cargo/env rustup install nightly-2023-02-17 rustup component add rustfmt --toolchain nightly-2023-02-17 # Name the nightly toolchain "abc-nightly" rustup toolchain link abc-nightly "$(rustc +nightly-2023-02-17 --print sysroot)" ``` Contributing to the web projects -------------------------------- To contribute to web projects, you will need `nodejs` > 16 and `npm` > 8.5.0. Follow these [installation instructions](https://github.com/nvm-sh/nvm#installing-and-updating) to install `nodejs` with node version manager. Then: ``` cd bitcoin-abc [sudo] nvm install 16 [sudo] npm install -g npm@latest [sudo] npm install -g prettier [sudo] npm install -g eslint ``` To work on the extension, you will need `browserify` ``` [sudo] npm install -g browserify ``` Working with The Bitcoin ABC Repository --------------------------------------- A typical workflow would be: - Create a topic branch in Git for your changes git checkout -b 'my-topic-branch' - Make your changes, and commit them git commit -a -m 'my-commit' - Create a differential with Arcanist arc diff You should add suggested reviewers and a test plan to the commit message. Note that Arcanist is set up to look only at the most-recent commit message, So all you changes for this Diff should be in one Git commit. - For large changes, break them into several Diffs, as described in this [guide](https://medium.com/@kurtisnusbaum/stacked-diffs-keeping-phabricator-diffs-small-d9964f4dcfa6). You must also include "Depends on Dxxx" in the Arcanist summary to indicate dependence on other Diffs. Note: the `arc land` procedure described in the guide above is obsolete. With the most recent version of arcanist, you may `arc land` the latest commit of your stacked diff after all parts are approved. - Log into Phabricator to see review and feedback. - Make changes as suggested by the reviewers. You can simply edit the files with my-topic-branch checked out, and then type `arc diff`. Arcanist will give you the option to add uncommited changes. Or, alternatively, you can commit the changes using `git commit -a --am` to add them to the last commit, or squash multiple commits by typing `git rebase -i master`. If you squash, make sure the commit message has the information needed for arcanist (such as the Diff number, reviewers, etc.). - Update your Diff by typing `arc diff` again. - When reviewers approve your Diff, it should be listed as "ready to Land" in Phabricator. When you want to commit your diff to the repository, check out type my-topic-branch in git, then type `arc land`. You have now successfully committed a change to the Bitcoin ABC repository. - When reviewing a Diff, apply the changeset on your local by using `arc patch D{NNNN}` - You will likely be re-writing git histories multiple times, which causes timestamp changes that require re-building a significant number of files. It's highly recommended to install `ccache` (re-run cmake if you install it later), as this will help cut your re-build times from several minutes to under a minute, in many cases. What to work on --------------- If you are looking for a useful task to contribute to the project, a good place to start is the list of tasks at . You could also try [backporting](doc/backporting.md) some code from Bitcoin Core. Copyright --------- By contributing to this repository, you agree to license your work under the MIT license unless specified otherwise in `contrib/debian/copyright` or at the top of the file itself. Any work contributed where you are not the original author must contain its license header with the original author(s) and source. Disclosure Policy ----------------- See [DISCLOSURE_POLICY](DISCLOSURE_POLICY.md). diff --git a/cmake/utils/gen-ninja-deps.py b/cmake/utils/gen-ninja-deps.py index f64544222..6a9beaf7a 100755 --- a/cmake/utils/gen-ninja-deps.py +++ b/cmake/utils/gen-ninja-deps.py @@ -1,181 +1,181 @@ #!/usr/bin/env python3 import argparse import os import subprocess parser = argparse.ArgumentParser(description='Produce a dep file from ninja.') parser.add_argument( '--build-dir', help='The build directory.', required=True) parser.add_argument( '--base-dir', help='The directory for which dependencies are rewriten.', required=True) parser.add_argument('--ninja', help='The ninja executable to use.') parser.add_argument( 'base_target', help="The target from the base's perspective.") parser.add_argument( 'targets', nargs='+', help='The target for which dependencies are extracted.') parser.add_argument( '--extra-deps', nargs='+', help='Extra dependencies.') args = parser.parse_args() build_dir = os.path.abspath(args.build_dir) base_dir = os.path.abspath(args.base_dir) ninja = args.ninja base_target = args.base_target targets = args.targets extra_deps = args.extra_deps # Make sure we operate in the right folder. os.chdir(build_dir) if ninja is None: ninja = subprocess.check_output(['command', '-v', 'ninja'])[:-1] # Construct the set of all targets all_targets = set() doto_targets = set() for t in subprocess.check_output([ninja, '-t', 'targets', 'all']).splitlines(): t, r = t.split(b':') all_targets.add(t) if r[:13] == b' C_COMPILER__' or r[:15] == b' CXX_COMPILER__': doto_targets.add(t) def parse_ninja_query(query): deps = {} lines = query.splitlines() while len(lines): line = lines.pop(0) if line[0] == ord(' '): continue # We have a new target target = line.split(b':')[0] assert lines.pop(0)[:8] == b' input:' inputs = set() while True: i = lines.pop(0) if i[:4] != b' ': break ''' ninja has 3 types of input: 1. Explicit dependencies, no prefix; 2. Implicit dependencies, | prefix. 3. Order only dependencies, || prefix. Order only dependency do not require the target to be rebuilt and so we ignore them. ''' i = i[4:] if i[0] == ord('|'): if i[1] == ord('|'): # We reached the order only dependencies. break i = i[2:] inputs.add(i) deps[target] = inputs return deps def extract_deps(workset): # Recursively extract the dependencies of the target. deps = {} while len(workset) > 0: query = subprocess.check_output([ninja, '-t', 'query'] + list(workset)) target_deps = parse_ninja_query(query) deps.update(target_deps) workset = set() for d in target_deps.values(): workset.update(t for t in d if t in all_targets and t not in deps) # Extract build time dependencies. bt_targets = [t for t in deps if t in doto_targets] if len(bt_targets) == 0: return deps ndeps = subprocess.check_output( [ninja, '-t', 'deps'] + bt_targets, stderr=subprocess.DEVNULL) lines = ndeps.splitlines() while len(lines) > 0: line = lines.pop(0) t, m = line.split(b':') if m == b' deps not found': continue inputs = set() while True: i = lines.pop(0) if i == b'': break assert i[:4] == b' ' inputs.add(i[4:]) deps[t] = inputs return deps base_dir = base_dir.encode() def rebase_deps(deps): rebased = {} cache = {} def rebase(path): if path in cache: return cache[path] abspath = os.path.abspath(path) newpath = path if path == abspath else os.path.relpath( abspath, base_dir) cache[path] = newpath return newpath for t, s in deps.items(): - rebased[rebase(t)] = set(rebase(d) for d in s) + rebased[rebase(t)] = {rebase(d) for d in s} return rebased deps = extract_deps(set(targets)) deps = rebase_deps(deps) def dump(deps): for t, d in deps.items(): if len(d) == 0: continue str = f"{t.decode()}: \\\n " - str += " \\\n ".join(sorted(map((lambda x: x.decode()), d))) + str += " \\\n ".join(sorted(x.decode() for x in d)) print(str) # Collapse everything under the base target. -basedeps = set() if extra_deps is None else set(d.encode() for d in extra_deps) +basedeps = set() if extra_deps is None else {d.encode() for d in extra_deps} for d in deps.values(): basedeps.update(d) base_target = base_target.encode() basedeps.discard(base_target) dump({base_target: basedeps}) diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py index e6c2b5612..33dae68bd 100755 --- a/contrib/devtools/circular-dependencies.py +++ b/contrib/devtools/circular-dependencies.py @@ -1,94 +1,94 @@ #!/usr/bin/env python3 # Copyright (c) 2018-2020 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import re import sys from typing import Dict, List, Set MAPPING = { 'core_read.cpp': 'core_io.cpp', 'core_write.cpp': 'core_io.cpp', } # Directories with header-based modules, where the assumption that .cpp files # define functions and variables declared in corresponding .h files is # incorrect. HEADER_MODULE_PATHS = [ 'interfaces/' ] def module_name(path): if path in MAPPING: path = MAPPING[path] if any(path.startswith(dirpath) for dirpath in HEADER_MODULE_PATHS): return path if path.endswith(".h"): return path[:-2] if path.endswith(".c"): return path[:-2] if path.endswith(".cpp"): return path[:-4] return None files = {} deps: Dict[str, Set[str]] = {} RE = re.compile("^#include <(.*)>") # Iterate over files, and create list of modules for arg in sys.argv[1:]: module = module_name(arg) if module is None: print(f"Ignoring file {arg} (does not constitute module)\n") else: files[arg] = module deps[module] = set() # Iterate again, and build list of direct dependencies for each module # TODO: implement support for multiple include directories for arg in sorted(files.keys()): module = files[arg] with open(arg, 'r', encoding="utf8") as f: for line in f: match = RE.match(line) if match: include = match.group(1) included_module = module_name(include) if included_module is not None and included_module in deps and included_module != module: deps[module].add(included_module) # Loop to find the shortest (remaining) circular dependency have_cycle = False while True: shortest_cycle = None for module in sorted(deps.keys()): # Build the transitive closure of dependencies of module closure: Dict[str, List[str]] = {dep: [] for dep in deps[module]} while True: old_size = len(closure) old_closure_keys = sorted(closure.keys()) for src in old_closure_keys: for dep in deps[src]: if dep not in closure: closure[dep] = closure[src] + [src] if len(closure) == old_size: break # If module is in its own transitive closure, it's a circular # dependency; check if it is the shortest if module in closure and (shortest_cycle is None or len( closure[module]) + 1 < len(shortest_cycle)): shortest_cycle = [module] + closure[module] if shortest_cycle is None: break # We have the shortest circular dependency; report it module = shortest_cycle[0] print(f"Circular dependency: {' -> '.join(shortest_cycle + [module])}") # And then break the dependency to avoid repeating in other cycles - deps[shortest_cycle[-1]] = deps[shortest_cycle[-1]] - set([module]) + deps[shortest_cycle[-1]] = deps[shortest_cycle[-1]] - {module} have_cycle = True sys.exit(1 if have_cycle else 0) diff --git a/src/bench/data/convert-raw-to-header.py b/src/bench/data/convert-raw-to-header.py index 97f0735dd..1a3de7071 100755 --- a/src/bench/data/convert-raw-to-header.py +++ b/src/bench/data/convert-raw-to-header.py @@ -1,21 +1,21 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin developers import sys def main(test_name, input_file): with open(input_file, "rb") as f: contents = f.read() print(f"static unsigned const char {test_name}_raw[] = {{") - print(", ".join(map(lambda x: f"0x{x:02x}", contents))) + print(", ".join(f"0x{x:02x}" for x in contents)) print("};") if __name__ == "__main__": if len(sys.argv) != 3: print("We need additional pylons!") sys.exit(1) main(sys.argv[1], sys.argv[2]) diff --git a/src/test/data/generate_asmap.py b/src/test/data/generate_asmap.py index 0847ca0c1..d691ac9f9 100755 --- a/src/test/data/generate_asmap.py +++ b/src/test/data/generate_asmap.py @@ -1,25 +1,25 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin developers import sys from pathlib import Path def main(input_file, output_file): with open(input_file, 'rb') as f: contents = f.read() with open(output_file, "w", encoding="utf-8") as f: f.write( f"static unsigned const char {Path(input_file).stem}_raw[] = {{\n") - f.write(", ".join(map(lambda x: f"0x{x:02x}", contents))) + f.write(", ".join(f"0x{x:02x}" for x in contents)) f.write("\n};\n") if __name__ == "__main__": if len(sys.argv) != 3: print("Invalid parameters\nUsage: {} input_file output_file".format( Path(sys.argv[0]).name)) sys.exit(1) main(sys.argv[1], sys.argv[2]) diff --git a/src/test/data/generate_header.py b/src/test/data/generate_header.py index 489ad113a..93083c7b0 100755 --- a/src/test/data/generate_header.py +++ b/src/test/data/generate_header.py @@ -1,24 +1,24 @@ #!/usr/bin/env python3 # Copyright (c) 2018-2019 The Bitcoin developers import sys def main(test_name, input_file): with open(input_file, "rb") as f: contents = f.read() print("#include \n") print("namespace json_tests {") print(f"static const uint8_t {test_name}[] = {{") - print(", ".join(map(lambda x: f"0x{x:02x}", contents))) + print(", ".join(f"0x{x:02x}" for x in contents)) print("};") print("};") if __name__ == "__main__": if len(sys.argv) != 3: print("We need additional pylons!") sys.exit(1) main(sys.argv[1], sys.argv[2]) diff --git a/test/functional/abc-mempool-coherence-on-activations.py b/test/functional/abc-mempool-coherence-on-activations.py index 39ba68698..612a3547f 100755 --- a/test/functional/abc-mempool-coherence-on-activations.py +++ b/test/functional/abc-mempool-coherence-on-activations.py @@ -1,369 +1,369 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks the mempool coherence when changing validation rulesets, which happens on (de)activations of network upgrades (forks). We test the mempool coherence in 3 cases: 1) on activations, pre-fork-only transactions are evicted from the mempool, while always-valid transactions remain. 2) on deactivations, post-fork-only transactions (unconfirmed or once confirmed) are evicted from the mempool, while always-valid transactions are reincluded. 3) on a reorg to a chain that deactivates and reactivates the fork, post-fork-only and always-valid transactions (unconfirmed and/or once confirmed on the shorter chain) are kept or reincluded in the mempool. """ from test_framework.blocktools import ( create_block, create_coinbase, create_tx_with_script, make_conform_to_ctor, ) from test_framework.key import ECKey from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, ToHex from test_framework.p2p import P2PDataStore from test_framework.script import ( OP_CHECKSIG, OP_TRUE, SIGHASH_ALL, SIGHASH_FORKID, CScript, SignatureHashForkId, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error # ---Code specific to the activation used for this test--- # It might change depending on the activation code currently existing in the # client software. We use the replay protection activation for this test. ACTIVATION_TIME = 2000000000 EXTRA_ARG = f"-replayprotectionactivationtime={ACTIVATION_TIME}" # simulation starts before activation FIRST_BLOCK_TIME = ACTIVATION_TIME - 86400 # Expected RPC error when trying to send an activation specific spend txn. RPC_EXPECTED_ERROR = "mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)" def create_fund_and_activation_specific_spending_tx(spend, pre_fork_only): # Creates 2 transactions: # 1) txfund: create outputs to be used by txspend. Must be valid pre-fork. # 2) txspend: spending transaction that is specific to the activation # being used and can be pre-fork-only or post-fork-only, depending on the # function parameter. # This specific implementation uses the replay protection mechanism to # create transactions that are only valid before or after the fork. # Generate a key pair to test private_key = ECKey() private_key.generate() public_key = private_key.get_pubkey().get_bytes() # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_tx_with_script( spend.tx, spend.n, b'', amount=50 * COIN, script_pub_key=script) txfund.rehash() # Activation specific spending tx txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE]))) txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction # Use forkvalues that create pre-fork-only or post-fork-only # transactions. forkvalue = 0 if pre_fork_only else 0xffdead sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID sighash = SignatureHashForkId( script, txspend, 0, sighashtype, 50 * COIN) sig = private_key.sign_ecdsa(sighash) + \ bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() return txfund, txspend def create_fund_and_pre_fork_only_tx(spend): return create_fund_and_activation_specific_spending_tx( spend, pre_fork_only=True) def create_fund_and_post_fork_only_tx(spend): return create_fund_and_activation_specific_spending_tx( spend, pre_fork_only=False) # ---Mempool coherence on activations test--- class PreviousSpendableOutput(object): def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n class MempoolCoherenceOnActivationsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} self.extra_args = [[ '-whitelist=noban@127.0.0.1', EXTRA_ARG, '-acceptnonstdtxn=1', '-automaticunparking=1', ]] def next_block(self, number): if self.tip is None: base_block_hash = self.genesis_hash block_time = FIRST_BLOCK_TIME else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height) coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # Do PoW, which is cheap on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def run_test(self): node = self.nodes[0] peer = node.add_p2p_connection(P2PDataStore()) node.setmocktime(ACTIVATION_TIME) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] block.vtx.extend(new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # send a txn to the mempool and check it was accepted def send_transaction_to_mempool(tx): tx_id = node.sendrawtransaction(ToHex(tx)) assert tx_id in node.getrawmempool() # checks the mempool has exactly the same txns as in the provided list def check_mempool_equal(txns): - assert set(node.getrawmempool()) == set(tx.hash for tx in txns) + assert set(node.getrawmempool()) == {tx.hash for tx in txns} # Create an always-valid chained transaction. It spends a # scriptPub=OP_TRUE coin into another. Returns the transaction and its # spendable output for further chaining. def create_always_valid_chained_tx(spend): tx = create_tx_with_script( spend.tx, spend.n, b'', amount=spend.tx.vout[0].nValue - 1000, script_pub_key=CScript([OP_TRUE])) tx.rehash() return tx, PreviousSpendableOutput(tx, 0) # shorthand block = self.next_block # Create a new block block(0) save_spendable_output() peer.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(110): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() peer.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Create 2 pre-fork-only txns (tx_pre0, tx_pre1). Fund txns are valid # pre-fork, so we can mine them right away. txfund0, tx_pre0 = create_fund_and_pre_fork_only_tx(out[0]) txfund1, tx_pre1 = create_fund_and_pre_fork_only_tx(out[1]) # Create 2 post-fork-only txns (tx_post0, tx_post1). Fund txns are # valid pre-fork, so we can mine them right away. txfund2, tx_post0 = create_fund_and_post_fork_only_tx(out[2]) txfund3, tx_post1 = create_fund_and_post_fork_only_tx(out[3]) # Create blocks to activate the fork. Mine all funding transactions. bfork = block(5555) bfork.nTime = ACTIVATION_TIME - 1 update_block(5555, [txfund0, txfund1, txfund2, txfund3]) peer.send_blocks_and_test([self.tip], node) for i in range(5): peer.send_blocks_and_test([block(5200 + i)], node) # Check we are just before the activation time assert_equal( node.getblockchaininfo()['mediantime'], ACTIVATION_TIME - 1) # We are just before the fork. Pre-fork-only and always-valid chained # txns (tx_chain0, tx_chain1) are valid, post-fork-only txns are # rejected. send_transaction_to_mempool(tx_pre0) send_transaction_to_mempool(tx_pre1) tx_chain0, last_chained_output = create_always_valid_chained_tx(out[4]) tx_chain1, last_chained_output = create_always_valid_chained_tx( last_chained_output) send_transaction_to_mempool(tx_chain0) send_transaction_to_mempool(tx_chain1) assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_post0)) assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_post1)) check_mempool_equal([tx_chain0, tx_chain1, tx_pre0, tx_pre1]) # Activate the fork. Mine the 1st always-valid chained txn and a # pre-fork-only txn. block(5556) update_block(5556, [tx_chain0, tx_pre0]) peer.send_blocks_and_test([self.tip], node) forkblockid = node.getbestblockhash() # Check we just activated the fork assert_equal(node.getblockheader(forkblockid)['mediantime'], ACTIVATION_TIME) # Check mempool coherence when activating the fork. Pre-fork-only txns # were evicted from the mempool, while always-valid txns remain. # Evicted: tx_pre1 check_mempool_equal([tx_chain1]) # Post-fork-only and always-valid txns are accepted, pre-fork-only txn # are rejected. send_transaction_to_mempool(tx_post0) send_transaction_to_mempool(tx_post1) tx_chain2, _ = create_always_valid_chained_tx(last_chained_output) send_transaction_to_mempool(tx_chain2) assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_pre1)) check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1]) # Mine the 2nd always-valid chained txn and a post-fork-only txn. block(5557) update_block(5557, [tx_chain1, tx_post0]) peer.send_blocks_and_test([self.tip], node) postforkblockid = node.getbestblockhash() # The mempool contains the 3rd chained txn and a post-fork-only txn. check_mempool_equal([tx_chain2, tx_post1]) # In the following we will testing block disconnections and reorgs. # - tx_chain2 will always be retained in the mempool since it is always # valid. Its continued presence shows that we are never simply # clearing the entire mempool. # - tx_post1 may be evicted from mempool if we land before the fork. # - tx_post0 is in a block and if 'de-mined', it will either be evicted # or end up in mempool depending if we land before/after the fork. # - tx_pre0 is in a block and if 'de-mined', it will either be evicted # or end up in mempool depending if we land after/before the fork. # First we do a disconnection of the post-fork block, which is a # normal disconnection that merely returns the block contents into # the mempool -- nothing is lost. node.invalidateblock(postforkblockid) # In old mempool: tx_chain2, tx_post1 # Recovered from blocks: tx_chain1 and tx_post0. # Lost from blocks: NONE # Retained from old mempool: tx_chain2, tx_post1 # Evicted from old mempool: NONE check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1]) # Now, disconnect the fork block. This is a special disconnection # that requires reprocessing the mempool due to change in rules. node.invalidateblock(forkblockid) # In old mempool: tx_chain1, tx_chain2, tx_post0, tx_post1 # Recovered from blocks: tx_chain0, tx_pre0 # Lost from blocks: NONE # Retained from old mempool: tx_chain1, tx_chain2 # Evicted from old mempool: tx_post0, tx_post1 check_mempool_equal([tx_chain0, tx_chain1, tx_chain2, tx_pre0]) # Restore state node.reconsiderblock(postforkblockid) node.reconsiderblock(forkblockid) send_transaction_to_mempool(tx_post1) check_mempool_equal([tx_chain2, tx_post1]) # Test a reorg that crosses the fork. # If such a reorg happens, most likely it will both start *and end* # after the fork. We will test such a case here and make sure that # post-fork-only transactions are not unnecessarily discarded from # the mempool in such a reorg. Pre-fork-only transactions however can # get lost. # Set up a longer competing chain that doesn't confirm any of our txns. # This starts after 5204, so it contains neither the forkblockid nor # the postforkblockid from above. self.tip = self.blocks[5204] reorg_blocks = [] for i in range(3): reorg_blocks.append(block(5900 + i)) # Perform the reorg peer.send_blocks_and_test(reorg_blocks, node) # reorg finishes after the fork assert_equal( node.getblockchaininfo()['mediantime'], ACTIVATION_TIME + 2) # In old mempool: tx_chain2, tx_post1 # Recovered from blocks: tx_chain0, tx_chain1, tx_post0 # Lost from blocks: tx_pre0 # Retained from old mempool: tx_chain2, tx_post1 # Evicted from old mempool: NONE check_mempool_equal( [tx_chain0, tx_chain1, tx_chain2, tx_post0, tx_post1]) if __name__ == '__main__': MempoolCoherenceOnActivationsTest().main() diff --git a/test/functional/abc-schnorr.py b/test/functional/abc-schnorr.py index 364c61779..661348944 100755 --- a/test/functional/abc-schnorr.py +++ b/test/functional/abc-schnorr.py @@ -1,239 +1,239 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This tests the treatment of Schnorr transaction signatures: - acceptance both in mempool and blocks. - check banning for peers who send txns with 64 byte ECDSA DER sigs. Derived from a variety of functional tests. """ from test_framework.blocktools import ( create_block, create_coinbase, create_tx_with_script, make_conform_to_ctor, ) from test_framework.key import ECKey from test_framework.messages import ( CBlock, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.p2p import P2PDataStore from test_framework.script import ( OP_1, OP_CHECKMULTISIG, OP_CHECKSIG, OP_TRUE, SIGHASH_ALL, SIGHASH_FORKID, CScript, SignatureHashForkId, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_raises_rpc_error # A mandatory (bannable) error occurs when people pass Schnorr signatures # into OP_CHECKMULTISIG. SCHNORR_MULTISIG_ERROR = 'mandatory-script-verify-flag-failed (Signature cannot be 65 bytes in CHECKMULTISIG)' # A mandatory (bannable) error occurs when people send invalid Schnorr # sigs into OP_CHECKSIG. NULLFAIL_ERROR = 'mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)' # Blocks with invalid scripts give this error: BADINPUTS_ERROR = 'blk-bad-inputs' # This 64-byte signature is used to test exclusion & banning according to # the above error messages. # Tests of real 64 byte ECDSA signatures can be found in script_tests. sig64 = b'\0' * 64 class SchnorrTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.block_heights = {} self.extra_args = [[ "-acceptnonstdtxn=1"]] def reconnect_p2p(self): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() self.nodes[0].add_p2p_connection(P2PDataStore()) def getbestblock(self, node): """Get the best block. Register its height so we can use build_block.""" block_height = node.getblockcount() blockhash = node.getblockhash(block_height) block = FromHex(CBlock(), node.getblock(blockhash, 0)) block.calc_sha256() self.block_heights[block.sha256] = block_height return block def build_block(self, parent, transactions=(), nTime=None): """Make a new block with an OP_1 coinbase output. Requires parent to have its height registered.""" parent.calc_sha256() block_height = self.block_heights[parent.sha256] + 1 block_time = (parent.nTime + 1) if nTime is None else nTime block = create_block( parent.sha256, create_coinbase(block_height), block_time) block.vtx.extend(transactions) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.block_heights[block.sha256] = block_height return block def check_for_ban_on_rejected_tx(self, tx, reject_reason=None): """Check we are disconnected when sending a txn that the node rejects. (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_txs_and_test( [tx], self.nodes[0], success=False, reject_reason=reject_reason, expect_disconnect=True) self.reconnect_p2p() def check_for_ban_on_rejected_block(self, block, reject_reason=None): """Check we are disconnected when sending a block that the node rejects. (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_blocks_and_test( [block], self.nodes[0], success=False, reject_reason=reject_reason, expect_disconnect=True) self.reconnect_p2p() def run_test(self): node, = self.nodes self.nodes[0].add_p2p_connection(P2PDataStore()) tip = self.getbestblock(node) self.log.info("Create some blocks with OP_1 coinbase for spending.") blocks = [] for _ in range(10): tip = self.build_block(tip) blocks.append(tip) node.p2ps[0].send_blocks_and_test(blocks, node, success=True) spendable_outputs = [block.vtx[0] for block in blocks] self.log.info("Mature the blocks and get out of IBD.") self.generate(node, 100, sync_fun=self.no_op) tip = self.getbestblock(node) self.log.info("Setting up spends to test and mining the fundings.") fundings = [] # Generate a key pair private_key = ECKey() private_key.set(b"Schnorr!" * 4, True) # get uncompressed public key serialization public_key = private_key.get_pubkey().get_bytes() def create_fund_and_spend_tx(multi=False, sig='schnorr'): spendfrom = spendable_outputs.pop() if multi: script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) else: script = CScript([public_key, OP_CHECKSIG]) value = spendfrom.vout[0].nValue # Fund transaction txfund = create_tx_with_script( spendfrom, 0, b'', amount=value, script_pub_key=script) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() txspend.vout.append( CTxOut(value - 1000, CScript([OP_TRUE]))) txspend.vin.append( CTxIn(COutPoint(txfund.sha256, 0), b'')) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID hashbyte = bytes([sighashtype & 0xff]) sighash = SignatureHashForkId( script, txspend, 0, sighashtype, value) if sig == 'schnorr': txsig = private_key.sign_schnorr(sighash) + hashbyte elif sig == 'ecdsa': txsig = private_key.sign_ecdsa(sighash) + hashbyte elif isinstance(sig, bytes): txsig = sig + hashbyte if multi: txspend.vin[0].scriptSig = CScript([b'', txsig]) else: txspend.vin[0].scriptSig = CScript([txsig]) txspend.rehash() return txspend schnorrchecksigtx = create_fund_and_spend_tx() schnorrmultisigtx = create_fund_and_spend_tx(multi=True) ecdsachecksigtx = create_fund_and_spend_tx(sig='ecdsa') sig64checksigtx = create_fund_and_spend_tx(sig=sig64) sig64multisigtx = create_fund_and_spend_tx(multi=True, sig=sig64) tip = self.build_block(tip, fundings) node.p2ps[0].send_blocks_and_test([tip], node) self.log.info("Typical ECDSA and Schnorr CHECKSIG are valid.") node.p2ps[0].send_txs_and_test( [schnorrchecksigtx, ecdsachecksigtx], node) # They get mined as usual. self.generate(node, 1, sync_fun=self.no_op) tip = self.getbestblock(node) # Make sure they are in the block, and mempool is now empty. - txhashes = set([schnorrchecksigtx.hash, ecdsachecksigtx.hash]) + txhashes = {schnorrchecksigtx.hash, ecdsachecksigtx.hash} assert txhashes.issubset(tx.rehash() for tx in tip.vtx) assert not node.getrawmempool() self.log.info("Schnorr in multisig is rejected with mandatory error.") assert_raises_rpc_error(-26, SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(schnorrmultisigtx)) # And it is banworthy. self.check_for_ban_on_rejected_tx( schnorrmultisigtx, SCHNORR_MULTISIG_ERROR) # And it can't be mined self.check_for_ban_on_rejected_block( self.build_block(tip, [schnorrmultisigtx]), BADINPUTS_ERROR) self.log.info("Bad 64-byte sig is rejected with mandatory error.") # In CHECKSIG it's invalid Schnorr and hence NULLFAIL. assert_raises_rpc_error(-26, NULLFAIL_ERROR, node.sendrawtransaction, ToHex(sig64checksigtx)) # In CHECKMULTISIG it's invalid length and hence BAD_LENGTH. assert_raises_rpc_error(-26, SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(sig64multisigtx)) # Sending these transactions is banworthy. self.check_for_ban_on_rejected_tx(sig64checksigtx, NULLFAIL_ERROR) self.check_for_ban_on_rejected_tx( sig64multisigtx, SCHNORR_MULTISIG_ERROR) # And they can't be mined either... self.check_for_ban_on_rejected_block( self.build_block(tip, [sig64checksigtx]), BADINPUTS_ERROR) self.check_for_ban_on_rejected_block( self.build_block(tip, [sig64multisigtx]), BADINPUTS_ERROR) if __name__ == '__main__': SchnorrTest().main() diff --git a/test/functional/abc_feature_proof_cleanup.py b/test/functional/abc_feature_proof_cleanup.py index 321676d42..de68bda69 100644 --- a/test/functional/abc_feature_proof_cleanup.py +++ b/test/functional/abc_feature_proof_cleanup.py @@ -1,134 +1,136 @@ #!/usr/bin/env python3 # Copyright (c) 2022 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test the dangling proofs cleanup """ import time from test_framework.avatools import ( gen_proof, get_ava_p2p_interface, get_ava_p2p_interface_no_handshake, get_proof_ids, wait_for_proof, ) from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, uint256_hex from test_framework.wallet_util import bytes_to_wif # Interval between 2 proof cleanups AVALANCHE_CLEANUP_INTERVAL = 5 * 60 # Dangling proof timeout AVALANCHE_DANGLING_PROOF_TIMEOUT = 15 * 60 class ProofsCleanupTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [[ '-avaproofstakeutxodustthreshold=1000000', '-avaproofstakeutxoconfirmations=1', # Get rid of the getdata delay penalty for inbounds '-whitelist=noban@127.0.0.1', ]] * self.num_nodes def run_test(self): node = self.nodes[0] master_key, local_proof = gen_proof(self, node) self.restart_node(0, self.extra_args[0] + [ f"-avaproof={local_proof.serialize().hex()}", f"-avamasterkey={bytes_to_wif(master_key.get_bytes())}", ]) # Add an inbound so the node proof can be registered and advertised node.add_p2p_connection(P2PInterface()) self.generate(node, 1, sync_fun=self.no_op) wait_for_proof(node, uint256_hex(local_proof.proofid)) mocktime = int(time.time()) node.setmocktime(mocktime) proofs = [local_proof] keys = [master_key] peers = [] # The first 5 peers have a node attached for _ in range(5): peer = get_ava_p2p_interface(self, node) proofs.append(peer.proof) keys.append(peer.master_privkey) peers.append(peer) # The last 5 peers have no node attached for _ in range(5): _, proof = gen_proof(self, node) node.sendavalancheproof(proof.serialize().hex()) proofs.append(proof) peer_info = node.getavalanchepeerinfo() assert_equal(len(peer_info), 11) assert_equal(set(get_proof_ids(node)), - set([proof.proofid for proof in proofs])) + {proof.proofid for proof in proofs}) self.log.info("No proof is cleaned before the timeout expires") mocktime += AVALANCHE_DANGLING_PROOF_TIMEOUT - 1 node.setmocktime(mocktime) # Run the cleanup, the proofs are still there node.mockscheduler(AVALANCHE_CLEANUP_INTERVAL) assert_equal(len(peer_info), 11) self.log.info("Check the proofs with attached nodes are not cleaned") # Run the cleanup, the proofs with no node are cleaned excepted our # local proof with node.assert_debug_log([f"Proof dropped for dangling too long (no connected node): {uint256_hex(p.proofid)}" for p in proofs[6:]]): # Expire the dangling proof timeout mocktime += 1 node.setmocktime(mocktime) node.mockscheduler(AVALANCHE_CLEANUP_INTERVAL) - self.wait_until(lambda: set(get_proof_ids(node)) == set( - [proof.proofid for proof in proofs[:6]]), timeout=5) + self.wait_until( + lambda: set(get_proof_ids(node)) == { + proof.proofid for proof in proofs[:6]}, + timeout=5) self.log.info( "Check the proofs are cleaned on next cleanup after the nodes disconnected") for peer in peers: peer.peer_disconnect() peer.wait_for_disconnect() node.mockscheduler(AVALANCHE_CLEANUP_INTERVAL) self.wait_until(lambda: get_proof_ids(node) == [local_proof.proofid]) self.log.info("Check the cleaned up proofs are no longer accepted...") sender = get_ava_p2p_interface_no_handshake(node) for proof in proofs[1:]: with node.assert_debug_log(["dangling-proof"]): sender.send_avaproof(proof) assert_raises_rpc_error(-8, "dangling-proof", node.sendavalancheproof, proof.serialize().hex()) assert_equal(get_proof_ids(node), [local_proof.proofid]) self.log.info("...until there is a node to attach") node.disconnect_p2ps() assert_equal(len(node.p2ps), 0) avanode = get_ava_p2p_interface(self, node) avanode.wait_until(lambda: avanode.last_message.get( "getdata") and avanode.last_message["getdata"].inv[-1].hash == avanode.proof.proofid) avanode.send_avaproof(avanode.proof) self.wait_until(lambda: avanode.proof.proofid in get_proof_ids(node)) if __name__ == '__main__': ProofsCleanupTest().main() diff --git a/test/functional/abc_p2p_compactproofs.py b/test/functional/abc_p2p_compactproofs.py index 27009e16f..a07c4003e 100644 --- a/test/functional/abc_p2p_compactproofs.py +++ b/test/functional/abc_p2p_compactproofs.py @@ -1,684 +1,684 @@ #!/usr/bin/env python3 # Copyright (c) 2022 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test proof inventory relaying """ import random import time from test_framework.avatools import ( AvaP2PInterface, NoHandshakeAvaP2PInterface, build_msg_avaproofs, gen_proof, get_ava_p2p_interface, get_proof_ids, wait_for_proof, ) from test_framework.messages import ( NODE_AVALANCHE, NODE_NETWORK, AvalanchePrefilledProof, calculate_shortid, msg_avaproofsreq, msg_getavaproofs, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import MAX_NODES, assert_equal, p2p_port, uint256_hex # Timeout after which the proofs can be cleaned up AVALANCHE_AVAPROOFS_TIMEOUT = 2 * 60 # Max interval between 2 periodic networking processing AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL = 5 * 60 class ProofStoreP2PInterface(AvaP2PInterface): def __init__(self): self.proofs = [] super().__init__() def on_avaproof(self, message): self.proofs.append(message.proof) def get_proofs(self): with p2p_lock: return self.proofs class CompactProofsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [[ '-avaproofstakeutxodustthreshold=1000000', '-avaproofstakeutxoconfirmations=1', '-avacooldown=0', ]] * self.num_nodes def setup_network(self): # Don't connect the nodes self.setup_nodes() @staticmethod def received_avaproofs(peer): with p2p_lock: return peer.last_message.get("avaproofs") def test_send_outbound_getavaproofs(self): self.log.info( "Check we send a getavaproofs message to our avalanche outbound peers") node = self.nodes[0] p2p_idx = 0 non_avapeers = [] for _ in range(4): peer = P2PInterface() node.add_outbound_p2p_connection( peer, p2p_idx=p2p_idx, connection_type="outbound-full-relay", services=NODE_NETWORK, ) non_avapeers.append(peer) p2p_idx += 1 inbound_avapeers = [ node.add_p2p_connection( NoHandshakeAvaP2PInterface()) for _ in range(4)] outbound_avapeers = [] # With a proof and the service bit set for _ in range(4): peer = AvaP2PInterface(self, node) node.add_outbound_p2p_connection( peer, p2p_idx=p2p_idx, connection_type="avalanche", services=NODE_NETWORK | NODE_AVALANCHE, ) outbound_avapeers.append(peer) p2p_idx += 1 # Without a proof and no service bit set for _ in range(4): peer = AvaP2PInterface() node.add_outbound_p2p_connection( peer, p2p_idx=p2p_idx, connection_type="outbound-full-relay", services=NODE_NETWORK, ) outbound_avapeers.append(peer) p2p_idx += 1 def all_peers_received_getavaproofs(): with p2p_lock: return all([p.last_message.get("getavaproofs") for p in outbound_avapeers]) self.wait_until(all_peers_received_getavaproofs) with p2p_lock: assert all([p.message_count.get( "getavaproofs", 0) >= 1 for p in outbound_avapeers]) assert all([p.message_count.get( "getavaproofs", 0) == 0 for p in non_avapeers]) assert all([p.message_count.get( "getavaproofs", 0) == 0 for p in inbound_avapeers]) self.log.info( "Check we send periodic getavaproofs message to some of our peers") def count_outbounds_getavaproofs(): with p2p_lock: return sum([p.message_count.get("getavaproofs", 0) for p in outbound_avapeers]) outbounds_getavaproofs = count_outbounds_getavaproofs() node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL) self.wait_until(lambda: count_outbounds_getavaproofs() == outbounds_getavaproofs + 3) outbounds_getavaproofs += 3 with p2p_lock: assert all([p.message_count.get( "getavaproofs", 0) == 0 for p in non_avapeers]) assert all([p.message_count.get( "getavaproofs", 0) == 0 for p in inbound_avapeers]) self.log.info( "After the first avaproofs has been received, all the peers are requested periodically") responding_outbound_avapeer = AvaP2PInterface(self, node) node.add_outbound_p2p_connection( responding_outbound_avapeer, p2p_idx=p2p_idx, connection_type="avalanche", services=NODE_NETWORK | NODE_AVALANCHE, ) p2p_idx += 1 outbound_avapeers.append(responding_outbound_avapeer) self.wait_until(all_peers_received_getavaproofs) _, proof = gen_proof(self, node) # Send the avaproofs message avaproofs = build_msg_avaproofs([proof]) responding_outbound_avapeer.send_and_ping(avaproofs) # Now the node will request from all its peers at each time period outbounds_getavaproofs = count_outbounds_getavaproofs() num_outbound_avapeers = len(outbound_avapeers) node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL) self.wait_until(lambda: count_outbounds_getavaproofs() == outbounds_getavaproofs + num_outbound_avapeers) outbounds_getavaproofs += num_outbound_avapeers self.log.info("Empty avaproofs will not trigger any request") for p in outbound_avapeers: p.send_message(build_msg_avaproofs([])) with p2p_lock: # Only this peer actually sent a proof assert_equal( responding_outbound_avapeer.message_count.get( "avaproofsreq", 0), 1) assert_equal(sum([p.message_count.get("avaproofsreq", 0) for p in outbound_avapeers]), 1) # Sanity checks assert all([p.message_count.get( "getavaproofs", 0) == 0 for p in non_avapeers]) assert all([p.message_count.get( "getavaproofs", 0) == 0 for p in inbound_avapeers]) def test_send_manual_getavaproofs(self): self.log.info( "Check we send a getavaproofs message to our manually connected peers that support avalanche") node = self.nodes[0] # Get rid of previously connected nodes node.disconnect_p2ps() def added_node_connected(ip_port): added_node_info = node.getaddednodeinfo(ip_port) return len( added_node_info) == 1 and added_node_info[0]['connected'] def connect_callback(address, port): self.log.debug(f"Connecting to {address}:{port}") p = AvaP2PInterface(self, node) p2p_idx = 1 p.peer_accept_connection( connect_cb=connect_callback, connect_id=p2p_idx, net=node.chain, timeout_factor=node.timeout_factor, services=NODE_NETWORK | NODE_AVALANCHE, )() ip_port = f"127.0.0.1:{p2p_port(MAX_NODES - p2p_idx)}" node.addnode(node=ip_port, command="add") self.wait_until(lambda: added_node_connected(ip_port)) assert_equal(node.getpeerinfo()[-1]['addr'], ip_port) assert_equal(node.getpeerinfo()[-1]['connection_type'], 'manual') # Make sure p.is_connected is set, otherwise the last_message check # below will assert. p.wait_for_connect() p.wait_until(lambda: p.last_message.get("getavaproofs")) def test_respond_getavaproofs(self): self.log.info("Check the node responds to getavaproofs messages") node = self.nodes[0] def send_getavaproof_check_shortid_len(peer, expected_len): peer.send_message(msg_getavaproofs()) self.wait_until(lambda: self.received_avaproofs(peer)) avaproofs = self.received_avaproofs(peer) assert_equal(len(avaproofs.shortids), expected_len) # Initially the node has 0 peer self.restart_node(0) assert_equal(len(get_proof_ids(node)), 0) peer = node.add_p2p_connection(NoHandshakeAvaP2PInterface()) send_getavaproof_check_shortid_len(peer, 0) # Add some proofs sending_peer = node.add_p2p_connection(NoHandshakeAvaP2PInterface()) for _ in range(50): _, proof = gen_proof(self, node) sending_peer.send_avaproof(proof) wait_for_proof(node, uint256_hex(proof.proofid)) proofids = get_proof_ids(node) assert_equal(len(proofids), 50) receiving_peer = node.add_p2p_connection(NoHandshakeAvaP2PInterface()) send_getavaproof_check_shortid_len(receiving_peer, len(proofids)) avaproofs = self.received_avaproofs(receiving_peer) expected_shortids = [ calculate_shortid( avaproofs.key0, avaproofs.key1, proofid) for proofid in sorted(proofids)] assert_equal(expected_shortids, avaproofs.shortids) # Don't expect any prefilled proof for now assert_equal(len(avaproofs.prefilled_proofs), 0) def test_request_missing_proofs(self): self.log.info( "Check the node requests the missing proofs after receiving an avaproofs message") node = self.nodes[0] self.restart_node(0) key0 = random.randint(0, 2**64 - 1) key1 = random.randint(0, 2**64 - 1) proofs = [gen_proof(self, node)[1] for _ in range(10)] # Build a map from proofid to shortid. Use sorted proofids so we don't # have the same indices than the `proofs` list. proofids = [p.proofid for p in proofs] shortid_map = {} for proofid in sorted(proofids): shortid_map[proofid] = calculate_shortid(key0, key1, proofid) self.log.info("The node ignores unsollicited avaproofs") spam_peer = get_ava_p2p_interface(self, node) msg = build_msg_avaproofs( proofs, prefilled_proofs=[], key_pair=[ key0, key1]) with node.assert_debug_log(["Ignoring unsollicited avaproofs"]): spam_peer.send_message(msg) def received_avaproofsreq(peer): with p2p_lock: return peer.last_message.get("avaproofsreq") p2p_idx = 0 def add_avalanche_p2p_outbound(): nonlocal p2p_idx peer = AvaP2PInterface(self, node) node.add_outbound_p2p_connection( peer, p2p_idx=p2p_idx, connection_type="avalanche", services=NODE_NETWORK | NODE_AVALANCHE, ) p2p_idx += 1 peer.wait_until(lambda: peer.last_message.get("getavaproofs")) return peer def expect_indices(shortids, expected_indices, prefilled_proofs=None): nonlocal p2p_idx msg = build_msg_avaproofs( [], prefilled_proofs=prefilled_proofs, key_pair=[ key0, key1]) msg.shortids = shortids peer = add_avalanche_p2p_outbound() peer.send_message(msg) self.wait_until(lambda: received_avaproofsreq(peer)) avaproofsreq = received_avaproofsreq(peer) assert_equal(avaproofsreq.indices, expected_indices) self.log.info("Check no proof is requested if there is no shortid") msg = build_msg_avaproofs([]) sender = add_avalanche_p2p_outbound() with node.assert_debug_log(["Got an avaproofs message with no shortid"]): sender.send_message(msg) # Make sure we don't get an avaproofsreq message sender.sync_send_with_ping() with p2p_lock: assert_equal(sender.message_count.get("avaproofsreq", 0), 0) self.log.info( "Check the node requests all the proofs if it known none") expect_indices( list(shortid_map.values()), - [i for i in range(len(shortid_map))] + list(range(len(shortid_map))) ) self.log.info( "Check the node requests only the missing proofs") known_proofids = [] for proof in proofs[:5]: node.sendavalancheproof(proof.serialize().hex()) known_proofids.append(proof.proofid) expected_indices = [i for i, proofid in enumerate( shortid_map) if proofid not in known_proofids] expect_indices(list(shortid_map.values()), expected_indices) self.log.info( "Check the node don't request prefilled proofs") # Get the indices for a couple of proofs indice_proof5 = list(shortid_map.keys()).index(proofids[5]) indice_proof6 = list(shortid_map.keys()).index(proofids[6]) prefilled_proofs = [ AvalanchePrefilledProof(indice_proof5, proofs[5]), AvalanchePrefilledProof(indice_proof6, proofs[6]), ] prefilled_proofs = sorted( prefilled_proofs, key=lambda prefilled_proof: prefilled_proof.index) remaining_shortids = [shortid for proofid, shortid in shortid_map.items( ) if proofid not in proofids[5:7]] known_proofids.extend(proofids[5:7]) expected_indices = [i for i, proofid in enumerate( shortid_map) if proofid not in known_proofids] expect_indices( remaining_shortids, expected_indices, prefilled_proofs=prefilled_proofs) self.log.info( "Check the node requests no proof if it knows all of them") for proof in proofs[5:]: node.sendavalancheproof(proof.serialize().hex()) known_proofids.append(proof.proofid) expect_indices(list(shortid_map.values()), []) self.log.info("Check out of bounds index") bad_peer = add_avalanche_p2p_outbound() msg = build_msg_avaproofs([], prefilled_proofs=[ AvalanchePrefilledProof( len(shortid_map) + 1, gen_proof(self, node)[1])], key_pair=[key0, key1]) msg.shortids = list(shortid_map.values()) with node.assert_debug_log(["Misbehaving", "avaproofs-bad-indexes"]): bad_peer.send_message(msg) bad_peer.wait_for_disconnect() self.log.info("An invalid prefilled proof will trigger a ban") _, no_stake = gen_proof(self, node) no_stake.stakes = [] bad_peer = add_avalanche_p2p_outbound() msg = build_msg_avaproofs([], prefilled_proofs=[ AvalanchePrefilledProof(len(shortid_map), no_stake), ], key_pair=[key0, key1]) msg.shortids = list(shortid_map.values()) with node.assert_debug_log(["Misbehaving", "invalid-proof"]): bad_peer.send_message(msg) bad_peer.wait_for_disconnect() def test_send_missing_proofs(self): self.log.info("Check the node respond to missing proofs requests") node = self.nodes[0] self.restart_node(0) numof_proof = 10 proofs = [gen_proof(self, node)[1] for _ in range(numof_proof)] for proof in proofs: node.sendavalancheproof(proof.serialize().hex()) proofids = get_proof_ids(node) assert all(proof.proofid in proofids for proof in proofs) self.log.info("Unsollicited requests are ignored") peer = node.add_p2p_connection(ProofStoreP2PInterface()) peer.send_and_ping(msg_avaproofsreq()) assert_equal(len(peer.get_proofs()), 0) def request_proofs(peer): peer.send_message(msg_getavaproofs()) self.wait_until(lambda: self.received_avaproofs(peer)) avaproofs = self.received_avaproofs(peer) assert_equal(len(avaproofs.shortids), numof_proof) return avaproofs _ = request_proofs(peer) self.log.info("Sending an empty request has no effect") peer.send_and_ping(msg_avaproofsreq()) assert_equal(len(peer.get_proofs()), 0) self.log.info("Check the requested proofs are sent by the node") def check_received_proofs(indices): requester = node.add_p2p_connection(ProofStoreP2PInterface()) avaproofs = request_proofs(requester) req = msg_avaproofsreq() req.indices = indices requester.send_message(req) # Check we got the expected number of proofs self.wait_until( lambda: len( requester.get_proofs()) == len(indices)) # Check we got the expected proofs received_shortids = [ calculate_shortid( avaproofs.key0, avaproofs.key1, proof.proofid) for proof in requester.get_proofs()] assert_equal(set(received_shortids), - set([avaproofs.shortids[i] for i in indices])) + {avaproofs.shortids[i] for i in indices}) # Only the first proof check_received_proofs([0]) # Only the last proof check_received_proofs([numof_proof - 1]) # Half first check_received_proofs(range(0, numof_proof // 2)) # Half last check_received_proofs(range(numof_proof // 2, numof_proof)) # Even check_received_proofs([i for i in range(numof_proof) if i % 2 == 0]) # Odds check_received_proofs([i for i in range(numof_proof) if i % 2 == 1]) # All check_received_proofs(range(numof_proof)) self.log.info( "Check the node will not send the proofs if not requested before the timeout elapsed") # Disconnect the peers for peer in node.p2ps: peer.peer_disconnect() peer.wait_for_disconnect() mocktime = int(time.time()) node.setmocktime(mocktime) slow_peer = ProofStoreP2PInterface() node.add_outbound_p2p_connection( slow_peer, p2p_idx=0, connection_type="avalanche", services=NODE_NETWORK | NODE_AVALANCHE, ) slow_peer.wait_until( lambda: slow_peer.last_message.get("getavaproofs")) slow_peer.nodeid = node.getpeerinfo()[-1]['id'] _ = request_proofs(slow_peer) # Elapse the timeout mocktime += AVALANCHE_AVAPROOFS_TIMEOUT + 1 node.setmocktime(mocktime) node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL) # Periodic compact proofs requests are sent in the same loop than the # cleanup, so when such a request is made we are sure the cleanup did # happen. slow_peer.wait_until( lambda: slow_peer.message_count.get("getavaproofs") > 1) req = msg_avaproofsreq() req.indices = range(numof_proof) slow_peer.send_and_ping(req) # Check we get no proof assert_equal(len(slow_peer.get_proofs()), 0) def test_compact_proofs_download_on_connect(self): self.log.info( "Check the node get compact proofs upon avalanche outbound discovery") requestee = self.nodes[0] requester = self.nodes[1] self.restart_node(0) numof_proof = 10 proofs = [gen_proof(self, requestee)[1] for _ in range(numof_proof)] for proof in proofs: requestee.sendavalancheproof(proof.serialize().hex()) proofids = get_proof_ids(requestee) assert all(proof.proofid in proofids for proof in proofs) # Start the requester and check it gets all the proofs self.start_node(1) self.connect_nodes(0, 1) self.wait_until( lambda: all( proof.proofid in proofids for proof in get_proof_ids(requester))) def test_no_compactproofs_during_ibs(self): self.log.info( "Check the node don't request compact proofs during IBD") node = self.nodes[0] chainwork = int(node.getblockchaininfo()['chainwork'], 16) self.restart_node( 0, extra_args=self.extra_args[0] + [f'-minimumchainwork={chainwork + 2:#x}']) assert node.getblockchaininfo()['initialblockdownload'] peer = P2PInterface() node.add_outbound_p2p_connection( peer, p2p_idx=0, connection_type="avalanche", services=NODE_NETWORK | NODE_AVALANCHE, ) # Force the node to process the sending loop peer.sync_send_with_ping() with p2p_lock: assert_equal(peer.message_count.get("getavaproofs", 0), 0) # Make sure there is no message sent as part as the periodic network # messaging either node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL) peer.sync_send_with_ping() with p2p_lock: assert_equal(peer.message_count.get("getavaproofs", 0), 0) def test_send_inbound_getavaproofs_until_quorum_is_established(self): self.log.info( "Check we also request the inbounds until the quorum is established") node = self.nodes[0] self.restart_node( 0, extra_args=self.extra_args[0] + ['-avaminquorumstake=1000000', '-avaminavaproofsnodecount=0']) assert_equal(node.getavalancheinfo()['ready_to_poll'], False) outbound = AvaP2PInterface() node.add_outbound_p2p_connection(outbound, p2p_idx=0) inbound = AvaP2PInterface() node.add_p2p_connection(inbound) inbound.nodeid = node.getpeerinfo()[-1]['id'] def count_getavaproofs(peers): with p2p_lock: return sum([peer.message_count.get("getavaproofs", 0) for peer in peers]) # Upon connection only the outbound gets a compact proofs message assert_equal(count_getavaproofs([inbound]), 0) self.wait_until(lambda: count_getavaproofs([outbound]) == 1) # Periodic send will include the inbound as well current_total = count_getavaproofs([inbound, outbound]) while count_getavaproofs([inbound]) == 0: node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL) self.wait_until(lambda: count_getavaproofs( [inbound, outbound]) > current_total) current_total = count_getavaproofs([inbound, outbound]) # Connect the minimum amount of stake and nodes for _ in range(8): node.add_p2p_connection(AvaP2PInterface(self, node)) self.wait_until(lambda: node.getavalancheinfo() ['ready_to_poll'] is True) # From now only the outbound is requested count_inbound = count_getavaproofs([inbound]) while count_getavaproofs([inbound, outbound]) < current_total + 20: node.mockscheduler(AVALANCHE_MAX_PERIODIC_NETWORKING_INTERVAL) inbound.sync_send_with_ping() outbound.sync_send_with_ping() assert_equal(count_getavaproofs([inbound]), count_inbound) def run_test(self): # Most if the tests only need a single node, let the other ones start # the node when required self.stop_node(1) self.test_send_outbound_getavaproofs() self.test_send_manual_getavaproofs() self.test_respond_getavaproofs() self.test_request_missing_proofs() self.test_send_missing_proofs() self.test_compact_proofs_download_on_connect() self.test_no_compactproofs_during_ibs() self.test_send_inbound_getavaproofs_until_quorum_is_established() if __name__ == '__main__': CompactProofsTest().main() diff --git a/test/functional/abc_p2p_proof_inventory.py b/test/functional/abc_p2p_proof_inventory.py index 95d419df5..5eba81af7 100644 --- a/test/functional/abc_p2p_proof_inventory.py +++ b/test/functional/abc_p2p_proof_inventory.py @@ -1,355 +1,355 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test proof inventory relaying """ import time from test_framework.address import ADDRESS_ECREG_UNSPENDABLE from test_framework.avatools import ( AvaP2PInterface, avalanche_proof_from_hex, gen_proof, get_proof_ids, wait_for_proof, ) from test_framework.messages import ( MSG_AVA_PROOF, MSG_TYPE_MASK, CInv, msg_avaproof, msg_getdata, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, uint256_hex, ) from test_framework.wallet_util import bytes_to_wif # Broadcast reattempt occurs every 10 to 15 minutes MAX_INITIAL_BROADCAST_DELAY = 15 * 60 # Delay to allow the node to respond to getdata requests UNCONDITIONAL_RELAY_DELAY = 2 * 60 class ProofInvStoreP2PInterface(P2PInterface): def __init__(self): super().__init__() self.proof_invs_counter = 0 self.last_proofid = None def on_inv(self, message): for i in message.inv: if i.type & MSG_TYPE_MASK == MSG_AVA_PROOF: self.proof_invs_counter += 1 self.last_proofid = i.hash class ProofInventoryTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 5 self.extra_args = [[ '-avaproofstakeutxodustthreshold=1000000', '-avaproofstakeutxoconfirmations=2', '-avacooldown=0', '-whitelist=noban@127.0.0.1', ]] * self.num_nodes def generate_proof(self, node, mature=True): privkey, proof = gen_proof(self, node) if mature: self.generate(node, 1, sync_fun=self.no_op) return privkey, proof def test_send_proof_inv(self): self.log.info("Test sending a proof to our peers") node = self.nodes[0] for _ in range(10): node.add_p2p_connection(ProofInvStoreP2PInterface()) _, proof = self.generate_proof(node) assert node.sendavalancheproof(proof.serialize().hex()) def proof_inv_found(peer): with p2p_lock: return peer.last_proofid == proof.proofid self.wait_until(lambda: all(proof_inv_found(i) for i in node.p2ps)) self.log.info("Test that we don't send the same inv several times") extra_peer = ProofInvStoreP2PInterface() node.add_p2p_connection(extra_peer) # Send the same proof one more time node.sendavalancheproof(proof.serialize().hex()) # Our new extra peer should receive it but not the others self.wait_until(lambda: proof_inv_found(extra_peer)) assert all(p.proof_invs_counter == 1 for p in node.p2ps) # Send the proof again and force the send loop to be processed for peer in node.p2ps: node.sendavalancheproof(proof.serialize().hex()) peer.sync_with_ping() assert all(p.proof_invs_counter == 1 for p in node.p2ps) def test_receive_proof(self): self.log.info("Test a peer is created on proof reception") node = self.nodes[0] _, proof = self.generate_proof(node) peer = node.add_p2p_connection(P2PInterface()) msg = msg_avaproof() msg.proof = proof peer.send_message(msg) self.wait_until(lambda: proof.proofid in get_proof_ids(node)) self.log.info("Test receiving a proof with an immature utxo") _, immature = self.generate_proof(node, mature=False) immature_proofid = uint256_hex(immature.proofid) msg = msg_avaproof() msg.proof = immature peer.send_message(msg) wait_for_proof(node, immature_proofid, expect_status="immature") def test_ban_invalid_proof(self): node = self.nodes[0] _, bad_proof = self.generate_proof(node) bad_proof.stakes = [] privkey = node.get_deterministic_priv_key().key missing_stake = node.buildavalancheproof( 1, 0, privkey, [{ 'txid': '0' * 64, 'vout': 0, 'amount': 10000000, 'height': 42, 'iscoinbase': False, 'privatekey': privkey, }] ) self.restart_node( 0, ['-avaproofstakeutxodustthreshold=1000000']) peer = node.add_p2p_connection(P2PInterface()) msg = msg_avaproof() # Sending a proof with a missing utxo doesn't trigger a ban msg.proof = avalanche_proof_from_hex(missing_stake) with node.assert_debug_log(["received: avaproof"], ["Misbehaving"]): peer.send_message(msg) peer.sync_with_ping() msg.proof = bad_proof with node.assert_debug_log([ 'Misbehaving', 'invalid-proof', ]): peer.send_message(msg) peer.wait_for_disconnect() def test_proof_relay(self): # This test makes no sense with less than 2 nodes ! assert_greater_than(self.num_nodes, 2) proofs_keys = [self.generate_proof(self.nodes[0]) for _ in self.nodes] - proofids = set([proof_key[1].proofid for proof_key in proofs_keys]) + proofids = {proof_key[1].proofid for proof_key in proofs_keys} # generate_proof does not sync, so do it manually self.sync_blocks() def restart_nodes_with_proof(nodes, extra_args=None): for node in nodes: privkey, proof = proofs_keys[node.index] self.restart_node(node.index, self.extra_args[node.index] + [ f"-avaproof={proof.serialize().hex()}", f"-avamasterkey={bytes_to_wif(privkey.get_bytes())}" ] + (extra_args or [])) restart_nodes_with_proof(self.nodes[:-1]) chainwork = int(self.nodes[-1].getblockchaininfo()['chainwork'], 16) restart_nodes_with_proof( self.nodes[-1:], extra_args=[f'-minimumchainwork={chainwork + 100:#x}']) # Add an inbound so the node proof can be registered and advertised [node.add_p2p_connection(P2PInterface()) for node in self.nodes] [[self.connect_nodes(node.index, j) for j in range(node.index)] for node in self.nodes] # Connect a block to make the proofs added to our pool self.generate(self.nodes[0], 1, sync_fun=self.sync_blocks) self.log.info("Nodes should eventually get the proof from their peer") self.sync_proofs(self.nodes[:-1]) for node in self.nodes[:-1]: assert_equal(set(get_proof_ids(node)), proofids) assert self.nodes[-1].getblockchaininfo()['initialblockdownload'] self.log.info("Except the node that has not completed IBD") assert_equal(len(get_proof_ids(self.nodes[-1])), 1) # The same if we send a proof directly with no download request peer = AvaP2PInterface() self.nodes[-1].add_p2p_connection(peer) _, proof = self.generate_proof(self.nodes[0]) peer.send_avaproof(proof) peer.sync_send_with_ping() with p2p_lock: assert_equal(peer.message_count.get('getdata', 0), 0) # Leave the nodes in good shape for the next tests restart_nodes_with_proof(self.nodes) [[self.connect_nodes(node.index, j) for j in range(node.index)] for node in self.nodes] def test_manually_sent_proof(self): node0 = self.nodes[0] _, proof = self.generate_proof(node0) self.log.info( "Send a proof via RPC and check all the nodes download it") node0.sendavalancheproof(proof.serialize().hex()) self.sync_proofs() def test_unbroadcast(self): self.log.info("Test broadcasting proofs") node = self.nodes[0] # Disconnect the other nodes/peers, or they will request the proof and # invalidate the test [n.stop_node() for n in self.nodes[1:]] node.disconnect_p2ps() def add_peers(count): peers = [] for i in range(count): peer = node.add_p2p_connection(ProofInvStoreP2PInterface()) peer.wait_for_verack() peers.append(peer) return peers _, proof = self.generate_proof(node) proofid_hex = uint256_hex(proof.proofid) # Broadcast the proof peers = add_peers(3) assert node.sendavalancheproof(proof.serialize().hex()) wait_for_proof(node, proofid_hex) def proof_inv_received(peers): with p2p_lock: return all(p.last_message.get( "inv") and p.last_message["inv"].inv[-1].hash == proof.proofid for p in peers) self.wait_until(lambda: proof_inv_received(peers)) # If no peer request the proof for download, the node should reattempt # broadcasting to all new peers after 10 to 15 minutes. peers = add_peers(3) node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY + 1) peers[-1].sync_with_ping() self.wait_until(lambda: proof_inv_received(peers)) # If at least one peer requests the proof, there is no more attempt to # broadcast it node.setmocktime(int(time.time()) + UNCONDITIONAL_RELAY_DELAY) msg = msg_getdata([CInv(t=MSG_AVA_PROOF, h=proof.proofid)]) peers[-1].send_message(msg) # Give enough time for the node to broadcast the proof again peers = add_peers(3) node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY + 1) peers[-1].sync_with_ping() assert not proof_inv_received(peers) self.log.info( "Proofs that become invalid should no longer be broadcasted") # Restart and add connect a new set of peers self.restart_node(0) # Broadcast the proof peers = add_peers(3) assert node.sendavalancheproof(proof.serialize().hex()) self.wait_until(lambda: proof_inv_received(peers)) # Sanity check our node knows the proof, and it is valid wait_for_proof(node, proofid_hex) # Mature the utxo then spend it self.generate(node, 100, sync_fun=self.no_op) utxo = proof.stakes[0].stake.utxo raw_tx = node.createrawtransaction( inputs=[{ # coinbase "txid": uint256_hex(utxo.hash), "vout": utxo.n }], outputs={ADDRESS_ECREG_UNSPENDABLE: 25_000_000 - 250.00}, ) signed_tx = node.signrawtransactionwithkey( hexstring=raw_tx, privkeys=[node.get_deterministic_priv_key().key], ) node.sendrawtransaction(signed_tx['hex']) # Mine the tx in a block self.generate(node, 1, sync_fun=self.no_op) # Wait for the proof to be invalidated def check_proof_not_found(proofid): try: assert_raises_rpc_error(-8, "Proof not found", node.getrawavalancheproof, proofid) return True except BaseException: return False self.wait_until(lambda: check_proof_not_found(proofid_hex)) # It should no longer be broadcasted peers = add_peers(3) node.mockscheduler(MAX_INITIAL_BROADCAST_DELAY + 1) peers[-1].sync_with_ping() assert not proof_inv_received(peers) def run_test(self): self.test_send_proof_inv() self.test_receive_proof() self.test_proof_relay() self.test_manually_sent_proof() # Run these tests last because they need to disconnect the nodes self.test_unbroadcast() self.test_ban_invalid_proof() if __name__ == '__main__': ProofInventoryTest().main() diff --git a/test/functional/abc_rpc_getavalanchepeerinfo.py b/test/functional/abc_rpc_getavalanchepeerinfo.py index 7371c975b..eb6e980e7 100755 --- a/test/functional/abc_rpc_getavalanchepeerinfo.py +++ b/test/functional/abc_rpc_getavalanchepeerinfo.py @@ -1,240 +1,239 @@ #!/usr/bin/env python3 # Copyright (c) 2020-2021 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the getavalanchepeerinfo RPC.""" from random import choice from test_framework.avatools import ( AvaP2PInterface, avalanche_proof_from_hex, create_coinbase_stakes, gen_proof, get_ava_p2p_interface_no_handshake, ) from test_framework.key import ECKey from test_framework.messages import ( NODE_AVALANCHE, NODE_NETWORK, AvalancheVote, AvalancheVoteError, ) from test_framework.p2p import p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, uint256_hex, ) from test_framework.wallet_util import bytes_to_wif # The interval between avalanche statistics computation AVALANCHE_STATISTICS_INTERVAL = 10 * 60 class MutedAvaP2PInterface(AvaP2PInterface): def __init__(self, test_framework=None, node=None): super().__init__(test_framework, node) self.is_responding = False self.privkey = None self.addr = None self.poll_received = 0 def on_avapoll(self, message): self.poll_received += 1 class AllYesAvaP2PInterface(MutedAvaP2PInterface): def __init__(self, test_framework=None, node=None): super().__init__(test_framework, node) self.is_responding = True def on_avapoll(self, message): self.send_avaresponse( message.poll.round, [ AvalancheVote( AvalancheVoteError.ACCEPTED, inv.hash) for inv in message.poll.invs], self.master_privkey if self.delegation is None else self.delegated_privkey) super().on_avapoll(message) class GetAvalanchePeerInfoTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [['-avaproofstakeutxodustthreshold=1000000', '-avaproofstakeutxoconfirmations=1', '-avacooldown=0']] def test_proofs_and_nodecounts(self): node = self.nodes[0] peercount = 5 nodecount = 10 self.log.info( f"Generating {peercount} peers with {nodecount} nodes each") addrkey0 = node.get_deterministic_priv_key() blockhashes = self.generatetoaddress( node, peercount, addrkey0.address, sync_fun=self.no_op) # Use the first coinbase to create a stake stakes = create_coinbase_stakes(node, blockhashes, addrkey0.key) def getProof(stake): privkey = ECKey() privkey.generate() pubkey = privkey.get_pubkey() proof_sequence = 11 proof_expiration = 0 proof = node.buildavalancheproof( proof_sequence, proof_expiration, bytes_to_wif( privkey.get_bytes()), [stake]) return (pubkey.get_bytes().hex(), proof) # Create peercount * nodecount node array nodes = [[get_ava_p2p_interface_no_handshake(node) for _ in range( nodecount)] for _ in range(peercount)] # Add peercount peers and bind all the nodes to each proofs = [] for i in range(peercount): pubkey_hex, proof = getProof(stakes[i]) proofs.append(proof) [node.addavalanchenode(n.nodeid, pubkey_hex, proof) for n in nodes[i]] self.log.info("Testing getavalanchepeerinfo...") avapeerinfo = node.getavalanchepeerinfo() assert_equal(len(avapeerinfo), peercount) for i, peer in enumerate(avapeerinfo): proofid_hex = uint256_hex( avalanche_proof_from_hex( proofs[i]).proofid) assert_equal(peer["avalanche_peerid"], i) assert_equal(peer["availability_score"], 0.0) assert_equal(peer["proofid"], proofid_hex) assert_equal(peer["proof"], proofs[i]) assert_equal(peer["nodecount"], nodecount) - assert_equal(set(peer["node_list"]), set( - [n.nodeid for n in nodes[i]])) + assert_equal(set(peer["node_list"]), {n.nodeid for n in nodes[i]}) self.log.info("Testing with a specified proofid") assert_raises_rpc_error(-8, "Proofid not found", node.getavalanchepeerinfo, proofid="0" * 64) target_proof = choice(proofs) target_proofid = avalanche_proof_from_hex(target_proof).proofid avapeerinfo = node.getavalanchepeerinfo( proofid=uint256_hex(target_proofid)) assert_equal(len(avapeerinfo), 1) assert_equal(avapeerinfo[0]["proof"], target_proof) def test_peer_availability_scores(self): self.restart_node(0, extra_args=self.extra_args[0] + [ '-avaminquorumstake=0', '-avaminavaproofsnodecount=0', ]) node = self.nodes[0] # Setup node interfaces, some responsive and some not avanodes = [ # First peer has all responsive nodes AllYesAvaP2PInterface(), AllYesAvaP2PInterface(), AllYesAvaP2PInterface(), # Next peer has only one responsive node MutedAvaP2PInterface(), MutedAvaP2PInterface(), AllYesAvaP2PInterface(), # Last peer has no responsive nodes MutedAvaP2PInterface(), MutedAvaP2PInterface(), MutedAvaP2PInterface(), ] # Create some proofs and associate the nodes with them avaproofids = [] p2p_idx = 0 num_proof = 3 num_avanode = 3 for p in range(num_proof): master_privkey, proof = gen_proof(self, node) avaproofids.append(uint256_hex(proof.proofid)) for n in range(num_avanode): avanode = avanodes[p * num_proof + n] avanode.master_privkey = master_privkey avanode.proof = proof node.add_outbound_p2p_connection( avanode, p2p_idx=p2p_idx, connection_type="avalanche", services=NODE_NETWORK | NODE_AVALANCHE) p2p_idx += 1 assert_equal(len(avanodes), num_proof * num_avanode) def all_nodes_connected(): avapeers = node.getavalanchepeerinfo() if len(avapeers) != num_proof: return False for avapeer in avapeers: if avapeer['nodecount'] != num_avanode: return False return True self.wait_until(all_nodes_connected) # Force the availability score to diverge between the responding and the # muted nodes. self.generate(node, 1, sync_fun=self.no_op) def poll_all_for_block(): with p2p_lock: return all([avanode.poll_received > ( 10 if avanode.is_responding else 0) for avanode in avanodes]) self.wait_until(poll_all_for_block) # Move the scheduler forward so that so that our peers get availability # scores computed. node.mockscheduler(AVALANCHE_STATISTICS_INTERVAL) def check_availability_scores(): peerinfos = node.getavalanchepeerinfo() # Get availability scores for each peer scores = {} for peerinfo in peerinfos: p = avaproofids.index(peerinfo['proofid']) scores[p] = peerinfo['availability_score'] # Wait until scores have been computed if scores[p] == 0.0: return False # Even though the first peer has more responsive nodes than the second # peer, they will both have "good" scores because overall both peers are # responsive to polls. But the first peer's score won't necessarily be # higher than the second. assert_greater_than(scores[0], 5) assert_greater_than(scores[1], 5) # Last peer should have negative score since it is unresponsive assert_greater_than(0.0, scores[2]) return True self.wait_until(check_availability_scores) def run_test(self): self.test_proofs_and_nodecounts() self.test_peer_availability_scores() if __name__ == '__main__': GetAvalanchePeerInfoTest().main() diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 68325f89c..31f8aeb8c 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -1,227 +1,226 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """An example functional test The module-level docstring should include a high-level description of what the test is doing. It's the first thing people see when they open the file and should give the reader information about *what* the test is testing and *how* it's being tested """ # Imports should be in PEP8 ordering (std library first, then third party # libraries then local imports). from collections import defaultdict # Avoid wildcard * imports if possible from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import MSG_BLOCK, CInv, msg_block, msg_getdata from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal # P2PInterface is a class containing callbacks to be executed when a P2P # message is received from the node-under-test. Subclass P2PInterface and # override the on_*() methods if you need custom behaviour. class BaseNode(P2PInterface): def __init__(self): """Initialize the P2PInterface Used to initialize custom properties for the Node that aren't included by default in the base class. Be aware that the P2PInterface base class already stores a counter for each P2P message type and the last received message of each type, which should be sufficient for the needs of most tests. Call super().__init__() first for standard initialization and then initialize custom properties.""" super().__init__() # Stores a dictionary of all blocks received self.block_receive_map = defaultdict(int) def on_block(self, message): """Override the standard on_block callback Store the hash of a received block in the dictionary.""" message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 def on_inv(self, message): """Override the standard on_inv callback""" pass def custom_function(): """Do some custom behaviour If this function is more generally useful for other tests, consider moving it to a module in test_framework.""" # self.log.info("running custom_function") # Oops! Can't run self.log # outside the BitcoinTestFramework pass class ExampleTest(BitcoinTestFramework): # Each functional test is a subclass of the BitcoinTestFramework class. # Override the set_test_params(), skip_test_if_missing_module(), add_options(), setup_chain(), setup_network() # and setup_nodes() methods to customize the test setup as required. def set_test_params(self): """Override test parameters for your individual test. This method must be overridden and num_nodes must be explicitly set.""" # By default every test loads a pre-mined chain of 200 blocks from cache. # Set setup_clean_chain to True to skip this and start from the Genesis # block. self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes self.extra_args = [[], ["-logips"], []] # self.log.info("I've finished set_test_params") # Oops! Can't run # self.log before run_test() # Use skip_test_if_missing_module() to skip the test if your test requires certain modules to be present. # This test uses generate which requires wallet to be compiled def skip_test_if_missing_module(self): self.skip_if_no_wallet() # Use add_options() to add specific command-line options for your test. # In practice this is not used very much, since the tests are mostly written # to be run in automated environments without command-line options. # def add_options() # pass # Use setup_chain() to customize the node data directories. In practice # this is not used very much since the default behaviour is almost always # fine # def setup_chain(): # pass def setup_network(self): """Setup the test network topology Often you won't need to override this, since the standard network topology (linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests. If you do override this method, remember to start the nodes, assign them to self.nodes, connect them and then sync.""" self.setup_nodes() # In this test, we're not connecting node2 to node0 or node1. Calls to # sync_all() should not include node2, since we're not expecting it to # sync. self.connect_nodes(0, 1) self.sync_all(self.nodes[0:2]) # Use setup_nodes() to customize the node start behaviour (for example if # you don't want to start all nodes at the start of the test). # def setup_nodes(): # pass def custom_method(self): """Do some custom behaviour for this test Define it in a method here because you're going to use it repeatedly. If you think it's useful in general, consider moving it to the base BitcoinTestFramework class so other tests can use it.""" self.log.info("Running custom_method") def run_test(self): """Main test logic""" # Create P2P connections will wait for a verack to make sure the # connection is fully up peer_messaging = self.nodes[0].add_p2p_connection(BaseNode()) # Generating a block on one of the nodes will get us out of IBD blocks = [int(self.generate(self.nodes[0], sync_fun=lambda: self.sync_all( self.nodes[0:2]), nblocks=1)[0], 16)] # Notice above how we called an RPC by calling a method with the same # name on the node object. Notice also how we used a keyword argument # to specify a named RPC argument. Neither of those are defined on the # node object. Instead there's some __getattr__() magic going on under # the covers to dispatch unrecognised attribute calls to the RPC # interface. # Logs are nice. Do plenty of them. They can be used in place of comments for # breaking the test into sub-sections. self.log.info("Starting test!") self.log.info("Calling a custom function") custom_function() self.log.info("Calling a custom method") self.custom_method() self.log.info("Create some blocks") self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 height = self.nodes[0].getblockcount() for _ in range(10): # Use the blocktools functionality to manually build a block. # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. block = create_block( self.tip, create_coinbase( height + 1), self.block_time) block.solve() block_message = msg_block(block) # Send message is used to send a P2P message to the node over our # P2PInterface peer_messaging.send_message(block_message) self.tip = block.sha256 blocks.append(self.tip) self.block_time += 1 height += 1 self.log.info( "Wait for node1 to reach current tip (height 11) using RPC") self.nodes[1].waitforblockheight(11) self.log.info("Connect node2 and node1") self.connect_nodes(1, 2) self.log.info("Wait for node2 to receive all the blocks from node1") self.sync_all() self.log.info("Add P2P connection to node2") self.nodes[0].disconnect_p2ps() peer_receiving = self.nodes[2].add_p2p_connection(BaseNode()) self.log.info("Test that node2 propagates all the blocks to us") getdata_request = msg_getdata() for block in blocks: getdata_request.inv.append(CInv(MSG_BLOCK, block)) peer_receiving.send_message(getdata_request) # wait_until() will loop until a predicate condition is met. Use it to test properties of the # P2PInterface objects. peer_receiving.wait_until( - lambda: sorted(blocks) == sorted(list( - peer_receiving.block_receive_map.keys())), + lambda: sorted(blocks) == sorted(peer_receiving.block_receive_map.keys()), timeout=5) self.log.info("Check that each block was received only once") # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking # and synchronization issues. Note p2p.wait_until() acquires this # global lock internally when testing the predicate. with p2p_lock: for block in peer_receiving.block_receive_map.values(): assert_equal(block, 1) if __name__ == '__main__': ExampleTest().main() diff --git a/test/functional/feature_bind_extra.py b/test/functional/feature_bind_extra.py index cb3f1fb0a..ceb72a889 100755 --- a/test/functional/feature_bind_extra.py +++ b/test/functional/feature_bind_extra.py @@ -1,93 +1,93 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test starting bitcoind with -bind and/or -bind=...=onion and confirm that bind happens on the expected ports. """ import sys from test_framework.netutil import addr_to_hex, get_bind_addrs from test_framework.test_framework import BitcoinTestFramework, SkipTest from test_framework.util import PORT_MIN, PORT_RANGE, assert_equal, rpc_port class BindExtraTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True # Avoid any -bind= on the command line. Force the framework to avoid # adding -bind=127.0.0.1. self.bind_to_localhost_only = False self.num_nodes = 2 def setup_network(self): # Override setup_network() because we want to put the result of # p2p_port() in self.extra_args[], before the nodes are started. # p2p_port() is not usable in set_test_params() because PortSeed.n is # not set at that time. # Due to OS-specific network stats queries, we only run on Linux. self.log.info("Checking for Linux") if not sys.platform.startswith('linux'): raise SkipTest("This test can only be run on Linux.") loopback_ipv4 = addr_to_hex("127.0.0.1") # Start custom ports after p2p and rpc ports. port = PORT_MIN + 2 * PORT_RANGE # Array of tuples [command line arguments, expected bind addresses]. self.expected = [] # Node0, no normal -bind=... with -bind=...=onion, thus only the tor # target. self.expected.append( [ [f"-bind=127.0.0.1:{port}=onion"], [(loopback_ipv4, port)] ], ) port += 1 # Node1, both -bind=... and -bind=...=onion. self.expected.append( [ [f"-bind=127.0.0.1:{port}", f"-bind=127.0.0.1:{port + 1}=onion"], [(loopback_ipv4, port), (loopback_ipv4, port + 1)] ], ) port += 2 - self.extra_args = list(map(lambda e: e[0], self.expected)) + self.extra_args = [e[0] for e in self.expected] self.add_nodes(self.num_nodes, self.extra_args) # Don't start the nodes, as some of them would collide trying to bind # on the same port. def run_test(self): for i in range(len(self.expected)): self.log.info(f"Starting node {i} with {self.expected[i][0]}") self.start_node(i) pid = self.nodes[i].process.pid binds = set(get_bind_addrs(pid)) # Remove IPv6 addresses because on some CI environments "::1" is not configured # on the system (so our test_ipv6_local() would return False), but it is # possible to bind on "::". This makes it unpredictable whether to expect # that bitcoind has bound on "::1" (for RPC) and "::" (for P2P). ipv6_addr_len_bytes = 32 binds = set( filter( lambda e: len( e[0]) != ipv6_addr_len_bytes, binds)) # Remove RPC ports. They are not relevant for this test. binds = set(filter(lambda e: e[1] != rpc_port(i), binds)) assert_equal(binds, set(self.expected[i][1])) self.stop_node(i) self.log.info(f"Stopped node {i}") if __name__ == '__main__': BindExtraTest().main() diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index d25fe0894..532e56391 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -1,204 +1,204 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the -alertnotify, -blocknotify and -walletnotify options.""" import os from test_framework.address import ADDRESS_ECREG_UNSPENDABLE, keyhash_to_p2pkh from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal FORK_WARNING_MESSAGE = "Warning: Large-work fork detected, forking after block {}" # Linux allow all characters other than \x00 # Windows disallow control characters (0-31) and /\?%:|"<> FILE_CHAR_START = 32 if os.name == 'nt' else 1 FILE_CHAR_END = 128 FILE_CHAR_BLACKLIST = '/\\?%*:|"<>' if os.name == 'nt' else '/' def notify_outputname(walletname, txid): return txid if os.name == 'nt' else f'{walletname}_{txid}' class NotificationsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self): self.wallet = ''.join( chr(i) for i in range( FILE_CHAR_START, FILE_CHAR_END) if chr(i) not in FILE_CHAR_BLACKLIST) self.alertnotify_dir = os.path.join(self.options.tmpdir, "alertnotify") self.blocknotify_dir = os.path.join(self.options.tmpdir, "blocknotify") self.walletnotify_dir = os.path.join( self.options.tmpdir, "walletnotify") os.mkdir(self.alertnotify_dir) os.mkdir(self.blocknotify_dir) os.mkdir(self.walletnotify_dir) # -alertnotify and -blocknotify on node0, walletnotify on node1 self.extra_args = [ [f"-alertnotify=echo > {os.path.join(self.alertnotify_dir, '%s')}", f"-blocknotify=echo > {os.path.join(self.blocknotify_dir, '%s')}"], ["-rescan", f"-walletnotify=echo > {os.path.join(self.walletnotify_dir, notify_outputname('%w', '%s'))}"]] self.wallet_names = [self.default_wallet_name, self.wallet] super().setup_network() def run_test(self): self.log.info("test -blocknotify") block_count = 10 blocks = self.generatetoaddress(self.nodes[1], block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_ECREG_UNSPENDABLE ) # wait at most 10 seconds for expected number of files before reading # the content self.wait_until( lambda: len(os.listdir(self.blocknotify_dir)) == block_count, timeout=10) # directory content should equal the generated blocks hashes assert_equal(sorted(blocks), sorted(os.listdir(self.blocknotify_dir))) if self.is_wallet_compiled(): self.log.info("test -walletnotify") # wait at most 10 seconds for expected number of files before # reading the content self.wait_until( lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) # directory content should equal the generated transaction hashes - txids_rpc = list(map(lambda t: notify_outputname( - self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count))) + txids_rpc = [notify_outputname(self.wallet, t['txid']) + for t in self.nodes[1].listtransactions("*", block_count)] assert_equal( sorted(txids_rpc), sorted( os.listdir( self.walletnotify_dir))) self.stop_node(1) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) self.log.info("test -walletnotify after rescan") # restart node to rescan to force wallet notifications self.start_node(1) self.connect_nodes(0, 1) self.wait_until( lambda: len(os.listdir(self.walletnotify_dir)) == block_count, timeout=10) # directory content should equal the generated transaction hashes - txids_rpc = list(map(lambda t: notify_outputname( - self.wallet, t['txid']), self.nodes[1].listtransactions("*", block_count))) + txids_rpc = [notify_outputname(self.wallet, t['txid']) + for t in self.nodes[1].listtransactions("*", block_count)] assert_equal( sorted(txids_rpc), sorted( os.listdir( self.walletnotify_dir))) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) # Conflicting transactions tests. Give node 0 same wallet seed as # node 1, generate spends from node 0, and check notifications # triggered by node 1 self.log.info("test -walletnotify with conflicting transactions") self.nodes[0].sethdseed( seed=self.nodes[1].dumpprivkey( keyhash_to_p2pkh( bytes.fromhex( self.nodes[1].getwalletinfo()['hdseedid'])[::-1]))) self.nodes[0].rescanblockchain() self.generatetoaddress( self.nodes[0], 100, ADDRESS_ECREG_UNSPENDABLE) # Generate transaction on node 0, sync mempools, and check for # notification on node 1. tx1 = self.nodes[0].sendtoaddress( address=ADDRESS_ECREG_UNSPENDABLE, amount=100) assert_equal(tx1 in self.nodes[0].getrawmempool(), True) self.sync_mempools() self.expect_wallet_notify([tx1]) # Add tx1 transaction to new block, checking for a notification # and the correct number of confirmations. self.generatetoaddress(self.nodes[0], 1, ADDRESS_ECREG_UNSPENDABLE) self.sync_blocks() self.expect_wallet_notify([tx1]) assert_equal(self.nodes[1].gettransaction(tx1)["confirmations"], 1) # Generate conflicting transactions with the nodes disconnected. # Sending almost the entire available balance on each node, but # with a slightly different amount, ensures that there will be # a conflict. balance = self.nodes[0].getbalance() self.disconnect_nodes(0, 1) tx2_node0 = self.nodes[0].sendtoaddress( address=ADDRESS_ECREG_UNSPENDABLE, amount=balance - 20) tx2_node1 = self.nodes[1].sendtoaddress( address=ADDRESS_ECREG_UNSPENDABLE, amount=balance - 21) assert tx2_node0 != tx2_node1 self.expect_wallet_notify([tx2_node1]) # So far tx2_node1 has no conflicting tx assert not self.nodes[1].gettransaction( tx2_node1)['walletconflicts'] # Mine a block on node0, reconnect the nodes, check that tx2_node1 # has a conflicting tx after syncing with node0. self.generatetoaddress( self.nodes[0], 1, ADDRESS_ECREG_UNSPENDABLE, sync_fun=self.no_op) self.connect_nodes(0, 1) self.sync_blocks() assert tx2_node0 in self.nodes[1].gettransaction(tx2_node1)[ 'walletconflicts'] # node1's wallet will notify of the new confirmed transaction tx2_0 # and about the conflicted transaction tx2_1. self.expect_wallet_notify([tx2_node0, tx2_node1]) # Create an invalid chain and ensure the node warns. self.log.info("test -alertnotify for forked chain") fork_block = self.nodes[0].getbestblockhash() self.generatetoaddress(self.nodes[0], 1, ADDRESS_ECREG_UNSPENDABLE) invalid_block = self.nodes[0].getbestblockhash() self.generatetoaddress(self.nodes[0], 7, ADDRESS_ECREG_UNSPENDABLE) # Invalidate a large branch, which should trigger an alert. self.nodes[0].invalidateblock(invalid_block) # Give bitcoind 10 seconds to write the alert notification self.wait_until(lambda: len(os.listdir(self.alertnotify_dir)), timeout=10) # The notification command is unable to properly handle the spaces on # windows. Skip the content check in this case. if os.name != 'nt': assert FORK_WARNING_MESSAGE.format( fork_block) in os.listdir(self.alertnotify_dir) for notify_file in os.listdir(self.alertnotify_dir): os.remove(os.path.join(self.alertnotify_dir, notify_file)) def expect_wallet_notify(self, tx_ids): self.wait_until( lambda: len(os.listdir(self.walletnotify_dir)) >= len(tx_ids), timeout=10) assert_equal( sorted(notify_outputname(self.wallet, tx_id) for tx_id in tx_ids), sorted(os.listdir(self.walletnotify_dir))) for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) if __name__ == '__main__': NotificationsTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 539a908c8..bf8b9ddf6 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -1,287 +1,287 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoind with different proxy configuration. Test plan: - Start bitcoind's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on bitcoind side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create nodes that connect to them - Manipulate the peer connections using addnode (onetry) and observe effects - Test the getpeerinfo `network` field for the peer addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name - Test getnetworkinfo for each node """ import os import socket from test_framework.netutil import test_ipv6_local from test_framework.socks5 import ( AddressType, Socks5Command, Socks5Configuration, Socks5Server, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import PORT_MIN, PORT_RANGE, assert_equal RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports # Networks returned by RPC getpeerinfo. NET_UNROUTABLE = "not_publicly_routable" NET_IPV4 = "ipv4" NET_IPV6 = "ipv6" NET_ONION = "onion" NET_I2P = "i2p" # Networks returned by RPC getnetworkinfo, defined in # src/rpc/net.cpp::GetNetworksInfo() NETWORKS = frozenset({NET_IPV4, NET_IPV6, NET_ONION, NET_I2P}) class ProxyTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ( '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ( '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.warning("Testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # We will not try to connect to this. self.i2p_sam = ('127.0.0.1', 7656) # Note: proxies are not used to connect to local nodes. # This is because the proxy to use is based on CService.GetNetwork(), # which returns NET_UNROUTABLE for localhost. ip1, port1 = self.conf1.addr ip2, port2 = self.conf2.addr args = [ ['-listen', f'-proxy={ip1}:{port1}', '-proxyrandomize=1'], ['-listen', f'-proxy={ip1}:{port1}', f'-onion={ip2}:{port2}', f'-i2psam={self.i2p_sam[0]}:{self.i2p_sam[1]}', '-i2pacceptincoming=0', '-proxyrandomize=0'], ['-listen', f'-proxy={ip2}:{port2}', '-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = [ '-listen', f'-proxy=[{self.conf3.addr[0]}]:{self.conf3.addr[1]}', '-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() def network_test(self, node, addr, network): for peer in node.getpeerinfo(): if peer["addr"] == addr: assert_equal(peer["network"], network) def node_test(self, node, proxies, auth, test_onion=True): rv = [] addr = "15.61.23.23:1234" self.log.debug( f"Test: outgoing IPv4 connection through node for address {addr}") node.addnode(addr, "onetry") cmd = proxies[0].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, # even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) self.network_test(node, addr, network=NET_IPV4) if self.have_ipv6: addr = "[1233:3432:2434:2343:3234:2345:6546:4534]:5443" self.log.debug( f"Test: outgoing IPv6 connection through node for address {addr}") node.addnode(addr, "onetry") cmd = proxies[1].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp # DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) self.network_test(node, addr, network=NET_IPV6) if test_onion: addr = "bitcoinostk4e4re.onion:8333" self.log.debug( f"Test: outgoing onion connection through node for address {addr}") node.addnode(addr, "onetry") cmd = proxies[2].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) self.network_test(node, addr, network=NET_ONION) addr = "node.noumenon:8333" self.log.debug( f"Test: outgoing DNS name connection through node for address {addr}") node.addnode(addr, "onetry") cmd = proxies[3].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) self.network_test(node, addr, network=NET_UNROUTABLE) return rv def run_test(self): # basic -proxy self.node_test( self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test( self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test( self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are # unique - credentials = set((x.username, x.password) for x in rv) + credentials = {(x.username, x.password) for x in rv} assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test( self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d): r = {} for x in d['networks']: r[x['name']] = x return r self.log.info("Test RPC getnetworkinfo") n0 = networks_dict(self.nodes[0].getnetworkinfo()) assert_equal(NETWORKS, n0.keys()) ip1, port1 = self.conf1.addr ip2, port2 = self.conf2.addr for net in NETWORKS: if net == NET_I2P: expected_proxy = '' expected_randomize = False else: expected_proxy = f'{ip1}:{port1}' expected_randomize = True assert_equal(n0[net]['proxy'], expected_proxy) assert_equal( n0[net]['proxy_randomize_credentials'], expected_randomize) assert_equal(n0['onion']['reachable'], True) assert_equal(n0['i2p']['reachable'], False) n1 = networks_dict(self.nodes[1].getnetworkinfo()) assert_equal(NETWORKS, n1.keys()) for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], f'{ip1}:{port1}') assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], f'{ip2}:{port2}') assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) assert_equal(n1['i2p']['proxy'], f'{self.i2p_sam[0]}:{self.i2p_sam[1]}') assert_equal(n1['i2p']['proxy_randomize_credentials'], False) assert_equal(n1['i2p']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) assert_equal(NETWORKS, n2.keys()) for net in NETWORKS: if net == NET_I2P: expected_proxy = '' expected_randomize = False else: expected_proxy = f'{ip2}:{port2}' expected_randomize = True assert_equal(n2[net]['proxy'], expected_proxy) assert_equal( n2[net]['proxy_randomize_credentials'], expected_randomize) assert_equal(n2['onion']['reachable'], True) assert_equal(n2['i2p']['reachable'], False) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) assert_equal(NETWORKS, n3.keys()) for net in NETWORKS: if net == NET_I2P: expected_proxy = '' else: expected_proxy = f'[{self.conf3.addr[0]}]:{self.conf3.addr[1]}' assert_equal(n3[net]['proxy'], expected_proxy) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) assert_equal(n3['i2p']['reachable'], False) if __name__ == '__main__': ProxyTest().main() diff --git a/test/functional/feature_utxo_set_hash.py b/test/functional/feature_utxo_set_hash.py index e347f8e27..3b072abca 100755 --- a/test/functional/feature_utxo_set_hash.py +++ b/test/functional/feature_utxo_set_hash.py @@ -1,92 +1,91 @@ #!/usr/bin/env python3 # Copyright (c) 2020-2021 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test UTXO set hash value calculation in gettxoutsetinfo.""" import struct from test_framework.blocktools import create_transaction from test_framework.messages import CBlock, COutPoint, FromHex from test_framework.muhash import MuHash3072 from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class UTXOSetHashTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def test_deterministic_hash_results(self): self.log.info("Test deterministic UTXO set hash results") # These depend on the setup_clean_chain option, the chain loaded from # the cache assert_equal( self.nodes[0].gettxoutsetinfo()['hash_serialized'], "b32ec1dda5a53cd025b95387aad344a801825fe46a60ff952ce26528f01d3be8") assert_equal( self.nodes[0].gettxoutsetinfo("muhash")['muhash'], "dd5ad2a105c2d29495f577245c357409002329b9f4d6182c0af3dc2f462555c8") def test_muhash_implementation(self): self.log.info("Test MuHash implementation consistency") node = self.nodes[0] # Generate 100 blocks and remove the first since we plan to spend its # coinbase block_hashes = self.generate(node, 100) - blocks = list(map( - lambda block: FromHex(CBlock(), node.getblock(block, False)), - block_hashes)) + blocks = [ + FromHex(CBlock(), node.getblock(block, False)) for block in block_hashes] spending = blocks.pop(0) # Create a spending transaction and mine a block which includes it tx = create_transaction( node, spending.vtx[0].rehash(), node.getnewaddress(), amount=49_000_000) txid = node.sendrawtransaction( hexstring=tx.serialize().hex(), maxfeerate=0) tx_block = self.generateblock(node, output=node.getnewaddress(), transactions=[txid]) blocks.append( FromHex(CBlock(), node.getblock(tx_block['hash'], False))) # Serialize the outputs that should be in the UTXO set and add them to # a MuHash object muhash = MuHash3072() for height, block in enumerate(blocks): # The Genesis block coinbase is not part of the UTXO set and we # spent the first mined block height += 2 for tx in block.vtx: for n, tx_out in enumerate(tx.vout): coinbase = 1 if not tx.vin[0].prevout.hash else 0 data = COutPoint(int(tx.rehash(), 16), n).serialize() data += struct.pack(" typedef signed long long i64; #define MAX_AMOUNT_LENGTH 22 struct utxocache_change { char txid[32]; u32 index; u32 height; char value[MAX_AMOUNT_LENGTH]; bool is_coinbase; }; BPF_PERF_OUTPUT(utxocache_add); int trace_utxocache_add(struct pt_regs *ctx) { struct utxocache_change add = {}; bpf_usdt_readarg_p(1, ctx, &add.txid, 32); bpf_usdt_readarg(2, ctx, &add.index); bpf_usdt_readarg(3, ctx, &add.height); bpf_usdt_readarg_p(4, ctx, &add.value, MAX_AMOUNT_LENGTH); bpf_usdt_readarg(5, ctx, &add.is_coinbase); utxocache_add.perf_submit(ctx, &add, sizeof(add)); return 0; } BPF_PERF_OUTPUT(utxocache_spent); int trace_utxocache_spent(struct pt_regs *ctx) { struct utxocache_change spent = {}; bpf_usdt_readarg_p(1, ctx, &spent.txid, 32); bpf_usdt_readarg(2, ctx, &spent.index); bpf_usdt_readarg(3, ctx, &spent.height); bpf_usdt_readarg_p(4, ctx, &spent.value, MAX_AMOUNT_LENGTH); bpf_usdt_readarg(5, ctx, &spent.is_coinbase); utxocache_spent.perf_submit(ctx, &spent, sizeof(spent)); return 0; } BPF_PERF_OUTPUT(utxocache_uncache); int trace_utxocache_uncache(struct pt_regs *ctx) { struct utxocache_change uncache = {}; bpf_usdt_readarg_p(1, ctx, &uncache.txid, 32); bpf_usdt_readarg(2, ctx, &uncache.index); bpf_usdt_readarg(3, ctx, &uncache.height); bpf_usdt_readarg_p(4, ctx, &uncache.value, MAX_AMOUNT_LENGTH); bpf_usdt_readarg(5, ctx, &uncache.is_coinbase); utxocache_uncache.perf_submit(ctx, &uncache, sizeof(uncache)); return 0; } """ utxocache_flushes_program = """ #include typedef signed long long i64; struct utxocache_flush { i64 duration; u32 mode; u64 size; u64 memory; bool for_prune; }; BPF_PERF_OUTPUT(utxocache_flush); int trace_utxocache_flush(struct pt_regs *ctx) { struct utxocache_flush flush = {}; bpf_usdt_readarg(1, ctx, &flush.duration); bpf_usdt_readarg(2, ctx, &flush.mode); bpf_usdt_readarg(3, ctx, &flush.size); bpf_usdt_readarg(4, ctx, &flush.memory); bpf_usdt_readarg(5, ctx, &flush.for_prune); utxocache_flush.perf_submit(ctx, &flush, sizeof(flush)); return 0; } """ FLUSHMODE_NAME = { 0: "NONE", 1: "IF_NEEDED", 2: "PERIODIC", 3: "ALWAYS", } class UTXOCacheChange(ctypes.Structure): _fields_ = [ ("txid", ctypes.c_ubyte * 32), ("index", ctypes.c_uint32), ("height", ctypes.c_uint32), ("value", ctypes.c_ubyte * MAX_AMOUNT_LENGTH), ("is_coinbase", ctypes.c_bool), ] def __repr__(self): return f"UTXOCacheChange(outpoint={bytes(self.txid[::-1]).hex()}:{self.index}, height={self.height}, value={self.value}, is_coinbase={self.is_coinbase})" class UTXOCacheFlush(ctypes.Structure): _fields_ = [ ("duration", ctypes.c_int64), ("mode", ctypes.c_uint32), ("size", ctypes.c_uint64), ("memory", ctypes.c_uint64), ("for_prune", ctypes.c_bool), ] def __repr__(self): return f"UTXOCacheFlush(duration={self.duration}, mode={FLUSHMODE_NAME[self.mode]}, size={self.size}, memory={self.memory}, for_prune={self.for_prune})" def c_string_to_str(c_string): return bytes(c_string).split(b'\x00', 1)[0].decode('ascii') class UTXOCacheTracepointTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [["-txindex"]] def skip_test_if_missing_module(self): self.skip_if_platform_not_linux() self.skip_if_no_bitcoind_tracepoints() self.skip_if_no_python_bcc() self.skip_if_no_bpf_permissions() def run_test(self): self.wallet = MiniWallet(self.nodes[0]) self.generate(self.wallet, 101) self.test_uncache() self.test_add_spent() self.test_flush() def test_uncache(self): """ Tests the utxocache:uncache tracepoint API. https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#tracepoint-utxocacheuncache """ # To trigger an UTXO uncache from the cache, we create an invalid transaction # spending a not-cached, but existing UTXO. During transaction validation, this # the UTXO is added to the utxo cache, but as the transaction is invalid, it's # uncached again. self.log.info("testing the utxocache:uncache tracepoint API") # Retrieve the txid for the UTXO created in the first block. This UTXO is not # in our UTXO cache. EARLY_BLOCK_HEIGHT = 1 block_1_hash = self.nodes[0].getblockhash(EARLY_BLOCK_HEIGHT) block_1 = self.nodes[0].getblock(block_1_hash) block_1_coinbase_txid = block_1["tx"][0] # Create a transaction and invalidate it by changing the txid of the previous # output to the coinbase txid of the block at height 1. invalid_tx = self.wallet.create_self_transfer( from_node=self.nodes[0])["tx"] invalid_tx.vin[0].prevout.hash = int(block_1_coinbase_txid, 16) self.log.info("hooking into the utxocache:uncache tracepoint") ctx = USDT(pid=self.nodes[0].process.pid) ctx.enable_probe(probe="utxocache:uncache", fn_name="trace_utxocache_uncache") bpf = BPF(text=utxocache_changes_program, usdt_contexts=[ctx], debug=0) # The handle_* function is a ctypes callback function called from C. When # we assert in the handle_* function, the AssertError doesn't propagate # back to Python. The exception is ignored. We manually count and assert # that the handle_* functions succeeded. EXPECTED_HANDLE_UNCACHE_SUCCESS = 1 handle_uncache_succeeds = 0 def handle_utxocache_uncache(_, data, __): nonlocal handle_uncache_succeeds event = ctypes.cast(data, ctypes.POINTER(UTXOCacheChange)).contents self.log.info(f"handle_utxocache_uncache(): {event}") assert_equal(block_1_coinbase_txid, bytes(event.txid[::-1]).hex()) assert_equal(0, event.index) # prevout index assert_equal(EARLY_BLOCK_HEIGHT, event.height) assert_equal("50000000.00 XEC", c_string_to_str(event.value)) assert_equal(True, event.is_coinbase) handle_uncache_succeeds += 1 bpf["utxocache_uncache"].open_perf_buffer(handle_utxocache_uncache) self.log.info( "testmempoolaccept the invalid transaction to trigger an UTXO-cache uncache") result = self.nodes[0].testmempoolaccept( [invalid_tx.serialize().hex()])[0] assert_equal(result["allowed"], False) bpf.perf_buffer_poll(timeout=100) bpf.cleanup() self.log.info( f"check that we successfully traced {EXPECTED_HANDLE_UNCACHE_SUCCESS} uncaches") assert_equal(EXPECTED_HANDLE_UNCACHE_SUCCESS, handle_uncache_succeeds) def test_add_spent(self): """ Tests the utxocache:add utxocache:spent tracepoint API See https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#tracepoint-utxocacheadd and https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#tracepoint-utxocachespent """ self.log.info( "test the utxocache:add and utxocache:spent tracepoint API") self.log.info("create an unconfirmed transaction") self.wallet.send_self_transfer(from_node=self.nodes[0]) # We mine a block to trace changes (add/spent) to the active in-memory cache # of the UTXO set (see CoinsTip() of CCoinsViewCache). However, in some cases # temporary clones of the active cache are made. For example, during mining with # the generate RPC call, the block is first tested in TestBlockValidity(). There, # a clone of the active cache is modified during a test ConnectBlock() call. # These are implementation details we don't want to test here. Thus, after # mining, we invalidate the block, start the tracing, and then trace the cache # changes to the active utxo cache. self.log.info("mine and invalidate a block that is later reconsidered") block_hash = self.generate(self.wallet, 1)[0] self.nodes[0].invalidateblock(block_hash) self.log.info( "hook into the utxocache:add and utxocache:spent tracepoints") ctx = USDT(pid=self.nodes[0].process.pid) ctx.enable_probe(probe="utxocache:add", fn_name="trace_utxocache_add") ctx.enable_probe(probe="utxocache:spent", fn_name="trace_utxocache_spent") bpf = BPF(text=utxocache_changes_program, usdt_contexts=[ctx], debug=0) # The handle_* function is a ctypes callback function called from C. When # we assert in the handle_* function, the AssertError doesn't propagate # back to Python. The exception is ignored. We manually count and assert # that the handle_* functions succeeded. EXPECTED_HANDLE_ADD_SUCCESS = 2 EXPECTED_HANDLE_SPENT_SUCCESS = 1 handle_add_succeeds = 0 handle_spent_succeeds = 0 expected_utxocache_spents = [] expected_utxocache_adds = [] def handle_utxocache_add(_, data, __): nonlocal handle_add_succeeds event = ctypes.cast(data, ctypes.POINTER(UTXOCacheChange)).contents self.log.info(f"handle_utxocache_add(): {event}") add = expected_utxocache_adds.pop(0) assert_equal(add["txid"], bytes(event.txid[::-1]).hex()) assert_equal(add["index"], event.index) assert_equal(add["height"], event.height) assert_equal(add["value"], c_string_to_str(event.value)) assert_equal(add["is_coinbase"], event.is_coinbase) handle_add_succeeds += 1 def handle_utxocache_spent(_, data, __): nonlocal handle_spent_succeeds event = ctypes.cast(data, ctypes.POINTER(UTXOCacheChange)).contents self.log.info(f"handle_utxocache_spent(): {event}") spent = expected_utxocache_spents.pop(0) assert_equal(spent["txid"], bytes(event.txid[::-1]).hex()) assert_equal(spent["index"], event.index) assert_equal(spent["height"], event.height) assert_equal(spent["value"], c_string_to_str(event.value)) assert_equal(spent["is_coinbase"], event.is_coinbase) handle_spent_succeeds += 1 bpf["utxocache_add"].open_perf_buffer(handle_utxocache_add) bpf["utxocache_spent"].open_perf_buffer(handle_utxocache_spent) # We trigger a block re-connection. This causes changes (add/spent) # to the UTXO-cache which in turn triggers the tracepoints. self.log.info("reconsider the previously invalidated block") self.nodes[0].reconsiderblock(block_hash) block = self.nodes[0].getblock(block_hash, 2) for (block_index, tx) in enumerate(block["tx"]): for vin in tx["vin"]: if "coinbase" not in vin: prevout_tx = self.nodes[0].getrawtransaction( vin["txid"], True) prevout_tx_block = self.nodes[0].getblockheader( prevout_tx["blockhash"]) spends_coinbase = "coinbase" in prevout_tx["vin"][0] expected_utxocache_spents.append({ "txid": vin["txid"], "index": vin["vout"], "height": prevout_tx_block["height"], "value": f"{int(prevout_tx['vout'][vin['vout']]['value']):.2f} XEC", "is_coinbase": spends_coinbase, }) for (i, vout) in enumerate(tx["vout"]): if vout["scriptPubKey"]["type"] != "nulldata": expected_utxocache_adds.append({ "txid": tx["txid"], "index": i, "height": block["height"], "value": f"{int(vout['value']):.2f} XEC", "is_coinbase": block_index == 0, }) assert_equal(EXPECTED_HANDLE_ADD_SUCCESS, len(expected_utxocache_adds)) assert_equal(EXPECTED_HANDLE_SPENT_SUCCESS, len(expected_utxocache_spents)) bpf.perf_buffer_poll(timeout=200) bpf.cleanup() self.log.info( f"check that we successfully traced {EXPECTED_HANDLE_ADD_SUCCESS} adds and {EXPECTED_HANDLE_SPENT_SUCCESS} spent") assert_equal(0, len(expected_utxocache_adds)) assert_equal(0, len(expected_utxocache_spents)) assert_equal(EXPECTED_HANDLE_ADD_SUCCESS, handle_add_succeeds) assert_equal(EXPECTED_HANDLE_SPENT_SUCCESS, handle_spent_succeeds) def test_flush(self): """ Tests the utxocache:flush tracepoint API. See https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#tracepoint-utxocacheflush""" self.log.info("test the utxocache:flush tracepoint API") self.log.info("hook into the utxocache:flush tracepoint") ctx = USDT(pid=self.nodes[0].process.pid) ctx.enable_probe(probe="utxocache:flush", fn_name="trace_utxocache_flush") bpf = BPF(text=utxocache_flushes_program, usdt_contexts=[ctx], debug=0) # The handle_* function is a ctypes callback function called from C. When # we assert in the handle_* function, the AssertError doesn't propagate # back to Python. The exception is ignored. We manually count and assert # that the handle_* functions succeeded. EXPECTED_HANDLE_FLUSH_SUCCESS = 3 handle_flush_succeeds = 0 - expected_flushes = list() + expected_flushes = [] def handle_utxocache_flush(_, data, __): nonlocal handle_flush_succeeds event = ctypes.cast(data, ctypes.POINTER(UTXOCacheFlush)).contents self.log.info(f"handle_utxocache_flush(): {event}") expected_flushes.remove({ "mode": FLUSHMODE_NAME[event.mode], "for_prune": event.for_prune, "size": event.size }) # sanity checks only assert event.memory > 0 assert event.duration > 0 handle_flush_succeeds += 1 bpf["utxocache_flush"].open_perf_buffer(handle_utxocache_flush) self.log.info("stop the node to flush the UTXO cache") # might need to be changed if the earlier tests are modified UTXOS_IN_CACHE = 2 # A node shutdown causes two flushes. One that flushes UTXOS_IN_CACHE # UTXOs and one that flushes 0 UTXOs. Normally the 0-UTXO-flush is the # second flush, however it can happen that the order changes. expected_flushes.append( {"mode": "ALWAYS", "for_prune": False, "size": UTXOS_IN_CACHE}) expected_flushes.append( {"mode": "ALWAYS", "for_prune": False, "size": 0}) self.stop_node(0) bpf.perf_buffer_poll(timeout=200) bpf.cleanup() self.log.info("check that we don't expect additional flushes") assert_equal(0, len(expected_flushes)) self.log.info("restart the node with -prune") self.start_node(0, ["-fastprune=1", "-prune=1"]) BLOCKS_TO_MINE = 350 self.log.info(f"mine {BLOCKS_TO_MINE} blocks to be able to prune") self.generate(self.wallet, BLOCKS_TO_MINE) self.log.info("test the utxocache:flush tracepoint API with pruning") self.log.info("hook into the utxocache:flush tracepoint") ctx = USDT(pid=self.nodes[0].process.pid) ctx.enable_probe(probe="utxocache:flush", fn_name="trace_utxocache_flush") bpf = BPF(text=utxocache_flushes_program, usdt_contexts=[ctx], debug=0) bpf["utxocache_flush"].open_perf_buffer(handle_utxocache_flush) self.log.info("prune blockchain to trigger a flush for pruning") expected_flushes.append({"mode": "NONE", "for_prune": True, "size": 0}) self.nodes[0].pruneblockchain(345) bpf.perf_buffer_poll(timeout=500) bpf.cleanup() self.log.info( "check that we don't expect additional flushes and that the handle_* function succeeded") assert_equal(0, len(expected_flushes)) assert_equal(EXPECTED_HANDLE_FLUSH_SUCCESS, handle_flush_succeeds) if __name__ == '__main__': UTXOCacheTracepointTest().main() diff --git a/test/functional/interface_usdt_validation.py b/test/functional/interface_usdt_validation.py index a1d125225..b041ce406 100755 --- a/test/functional/interface_usdt_validation.py +++ b/test/functional/interface_usdt_validation.py @@ -1,136 +1,136 @@ #!/usr/bin/env python3 # Copyright (c) 2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Tests the validation:* tracepoint API interface. See https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#context-validation """ import ctypes # Test will be skipped if we don't have bcc installed try: from bcc import BPF, USDT # type: ignore[import] except ImportError: pass from test_framework.address import ADDRESS_ECREG_UNSPENDABLE from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal validation_blockconnected_program = """ #include typedef signed long long i64; struct connected_block { char hash[32]; int height; i64 transactions; int inputs; i64 sigchk; u64 duration; }; BPF_PERF_OUTPUT(block_connected); int trace_block_connected(struct pt_regs *ctx) { struct connected_block block = {}; bpf_usdt_readarg_p(1, ctx, &block.hash, 32); bpf_usdt_readarg(2, ctx, &block.height); bpf_usdt_readarg(3, ctx, &block.transactions); bpf_usdt_readarg(4, ctx, &block.inputs); bpf_usdt_readarg(5, ctx, &block.sigchk); bpf_usdt_readarg(6, ctx, &block.duration); block_connected.perf_submit(ctx, &block, sizeof(block)); return 0; } """ class ValidationTracepointTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_platform_not_linux() self.skip_if_no_bitcoind_tracepoints() self.skip_if_no_python_bcc() self.skip_if_no_bpf_permissions() def run_test(self): # Tests the validation:block_connected tracepoint by generating blocks # and comparing the values passed in the tracepoint arguments with the # blocks. # See # https://github.com/bitcoin/bitcoin/blob/master/doc/tracing.md#tracepoint-validationblock_connected class Block(ctypes.Structure): _fields_ = [ ("hash", ctypes.c_ubyte * 32), ("height", ctypes.c_int), ("transactions", ctypes.c_int64), ("inputs", ctypes.c_int), ("sigchk", ctypes.c_int64), ("duration", ctypes.c_uint64), ] def __repr__(self): return ( f"ConnectedBlock(hash={bytes(self.hash[::-1]).hex()} " f"height={self.height}, transactions={self.transactions}, " f"inputs={self.inputs}, sigchk={self.sigchk}, " f"duration={self.duration})" ) # The handle_* function is a ctypes callback function called from C. When # we assert in the handle_* function, the AssertError doesn't propagate # back to Python. The exception is ignored. We manually count and assert # that the handle_* functions succeeded. BLOCKS_EXPECTED = 2 blocks_checked = 0 - expected_blocks = list() + expected_blocks = [] self.log.info("hook into the validation:block_connected tracepoint") ctx = USDT(pid=self.nodes[0].process.pid) ctx.enable_probe(probe="validation:block_connected", fn_name="trace_block_connected") bpf = BPF(text=validation_blockconnected_program, usdt_contexts=[ctx], debug=0) def handle_blockconnected(_, data, __): nonlocal expected_blocks, blocks_checked event = ctypes.cast(data, ctypes.POINTER(Block)).contents self.log.info(f"handle_blockconnected(): {event}") block = expected_blocks.pop(0) assert_equal(block["hash"], bytes(event.hash[::-1]).hex()) assert_equal(block["height"], event.height) assert_equal(len(block["tx"]), event.transactions) assert_equal(len([tx["vin"] for tx in block["tx"]]), event.inputs) # no sigchk in coinbase tx assert_equal(0, event.sigchk) # only plausibility checks assert event.duration > 0 blocks_checked += 1 bpf["block_connected"].open_perf_buffer( handle_blockconnected) self.log.info(f"mine {BLOCKS_EXPECTED} blocks") block_hashes = self.generatetoaddress( self.nodes[0], BLOCKS_EXPECTED, ADDRESS_ECREG_UNSPENDABLE) for block_hash in block_hashes: expected_blocks.append(self.nodes[0].getblock(block_hash, 2)) bpf.perf_buffer_poll(timeout=200) bpf.cleanup() self.log.info(f"check that we traced {BLOCKS_EXPECTED} blocks") assert_equal(BLOCKS_EXPECTED, blocks_checked) assert_equal(0, len(expected_blocks)) if __name__ == '__main__': ValidationTracepointTest().main() diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py index 1fe1d3d5e..9ebac6dcc 100755 --- a/test/functional/p2p_addrv2_relay.py +++ b/test/functional/p2p_addrv2_relay.py @@ -1,74 +1,74 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test addrv2 relay """ import time from test_framework.messages import NODE_NETWORK, CAddress, msg_addrv2 from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework ADDRS = [] for i in range(10): addr = CAddress() addr.time = int(time.time()) + i addr.nServices = NODE_NETWORK addr.ip = f"123.123.123.{i % 256}" addr.port = 8333 + i ADDRS.append(addr) class AddrReceiver(P2PInterface): addrv2_received_and_checked = False def __init__(self): super().__init__(support_addrv2=True) def on_addrv2(self, message): - expected_set = set((addr.ip, addr.port) for addr in ADDRS) - received_set = set((addr.ip, addr.port) for addr in message.addrs) + expected_set = {(addr.ip, addr.port) for addr in ADDRS} + received_set = {(addr.ip, addr.port) for addr in message.addrs} if expected_set == received_set: self.addrv2_received_and_checked = True def wait_for_addrv2(self): self.wait_until(lambda: "addrv2" in self.last_message) class AddrTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-whitelist=addr@127.0.0.1"]] def run_test(self): self.log.info('Create connection that sends addrv2 messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = msg_addrv2() self.log.info('Send too-large addrv2 message') msg.addrs = ADDRS * 101 with self.nodes[0].assert_debug_log(['addrv2 message size = 1010']): addr_source.send_and_ping(msg) self.log.info( 'Check that addrv2 message content is relayed and added to addrman') addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) msg.addrs = ADDRS with self.nodes[0].assert_debug_log([ 'Added 10 addresses from 127.0.0.1: 0 tried', 'received: addrv2 (131 bytes) peer=0', 'sending addrv2 (131 bytes) peer=1', ]): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) addr_receiver.wait_for_addrv2() assert addr_receiver.addrv2_received_and_checked if __name__ == '__main__': AddrTest().main() diff --git a/test/functional/p2p_inv_download.py b/test/functional/p2p_inv_download.py index 29ba59dd4..48db7553e 100755 --- a/test/functional/p2p_inv_download.py +++ b/test/functional/p2p_inv_download.py @@ -1,477 +1,477 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test inventory download behavior """ import functools import time from test_framework.address import ADDRESS_ECREG_UNSPENDABLE from test_framework.avatools import avalanche_proof_from_hex, gen_proof, wait_for_proof from test_framework.key import ECKey from test_framework.messages import ( MSG_AVA_PROOF, MSG_TX, MSG_TYPE_MASK, CInv, CTransaction, FromHex, msg_avaproof, msg_inv, msg_notfound, ) from test_framework.p2p import P2PInterface, p2p_lock from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, uint256_hex from test_framework.wallet_util import bytes_to_wif class TestP2PConn(P2PInterface): def __init__(self, inv_type): super().__init__() self.inv_type = inv_type self.getdata_count = 0 def on_getdata(self, message): for i in message.inv: if i.type & MSG_TYPE_MASK == self.inv_type: self.getdata_count += 1 class NetConstants: """Constants from net_processing""" def __init__(self, getdata_interval, inbound_peer_delay, overloaded_peer_delay, max_getdata_in_flight, max_peer_announcements, bypass_request_limits_permission_flags, ): self.getdata_interval = getdata_interval self.inbound_peer_delay = inbound_peer_delay self.overloaded_peer_delay = overloaded_peer_delay self.max_getdata_in_flight = max_getdata_in_flight self.max_peer_announcements = max_peer_announcements self.max_getdata_inbound_wait = self.getdata_interval + self.inbound_peer_delay self.bypass_request_limits_permission_flags = bypass_request_limits_permission_flags class TestContext: def __init__(self, inv_type, inv_name, constants): self.inv_type = inv_type self.inv_name = inv_name self.constants = constants def p2p_conn(self): return TestP2PConn(self.inv_type) PROOF_TEST_CONTEXT = TestContext( MSG_AVA_PROOF, "avalanche proof", NetConstants( getdata_interval=60, # seconds inbound_peer_delay=2, # seconds overloaded_peer_delay=2, # seconds max_getdata_in_flight=100, max_peer_announcements=5000, bypass_request_limits_permission_flags="bypass_proof_request_limits", ), ) TX_TEST_CONTEXT = TestContext( MSG_TX, "transaction", NetConstants( getdata_interval=60, # seconds inbound_peer_delay=2, # seconds overloaded_peer_delay=2, # seconds max_getdata_in_flight=100, max_peer_announcements=5000, bypass_request_limits_permission_flags="relay", ), ) # Python test constants NUM_INBOUND = 10 # Common network parameters UNCONDITIONAL_RELAY_DELAY = 2 * 60 def skip(context): def decorator(test): @functools.wraps(test) def wrapper(*args, **kwargs): # Assume the signature is test(self, context) unless context is # passed by name call_context = kwargs.get("context", args[1]) if call_context == context: return lambda *args, **kwargs: None return test(*args, **kwargs) return wrapper return decorator class InventoryDownloadTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [['-avaproofstakeutxodustthreshold=1000000', '-avaproofstakeutxoconfirmations=1', '-avacooldown=0']] * self.num_nodes def test_data_requests(self, context): self.log.info( "Test that we request data from all our peers, eventually") invid = 0xdeadbeef self.log.info("Announce the invid from each incoming peer to node 0") msg = msg_inv([CInv(t=context.inv_type, h=invid)]) for p in self.nodes[0].p2ps: p.send_and_ping(msg) - outstanding_peer_index = [i for i in range(len(self.nodes[0].p2ps))] + outstanding_peer_index = list(range(len(self.nodes[0].p2ps))) def getdata_found(peer_index): p = self.nodes[0].p2ps[peer_index] with p2p_lock: return p.last_message.get( "getdata") and p.last_message["getdata"].inv[-1].hash == invid node_0_mocktime = int(time.time()) while outstanding_peer_index: node_0_mocktime += context.constants.max_getdata_inbound_wait self.nodes[0].setmocktime(node_0_mocktime) self.wait_until(lambda: any(getdata_found(i) for i in outstanding_peer_index)) for i in outstanding_peer_index: if getdata_found(i): outstanding_peer_index.remove(i) self.nodes[0].setmocktime(0) self.log.info("All outstanding peers received a getdata") @skip(PROOF_TEST_CONTEXT) def test_inv_tx(self, context): self.log.info("Generate a transaction on node 0") tx = self.nodes[0].createrawtransaction( inputs=[{ # coinbase "txid": self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0], "vout": 0 }], outputs={ADDRESS_ECREG_UNSPENDABLE: 50000000 - 250.00}, ) tx = self.nodes[0].signrawtransactionwithkey( hexstring=tx, privkeys=[self.nodes[0].get_deterministic_priv_key().key], )['hex'] ctx = FromHex(CTransaction(), tx) txid = int(ctx.rehash(), 16) self.log.info( f"Announce the transaction to all nodes from all {NUM_INBOUND} incoming " "peers, but never send it") msg = msg_inv([CInv(t=context.inv_type, h=txid)]) for p in self.peers: p.send_and_ping(msg) self.log.info("Put the tx in node 0's mempool") self.nodes[0].sendrawtransaction(tx) # node1 is an inbound peer for node0, so the tx relay is delayed by a # duration calculated using a poisson's law with a 5s average time. # In order to make sure the inv is sent we move the time 2 minutes # forward, which has the added side effect that the tx can be # unconditionally requested. with self.nodes[1].assert_debug_log( [f"got inv: tx {uint256_hex(txid)} new peer=0"]): self.nodes[0].setmocktime( int(time.time()) + UNCONDITIONAL_RELAY_DELAY) # Since node 1 is connected outbound to an honest peer (node 0), it # should get the tx within a timeout. # The timeout is the sum of # * the worst case until the tx is first requested from an inbound # peer, plus # * the first time it is re-requested from the outbound peer, plus # * 2 seconds to avoid races assert self.nodes[1].getpeerinfo()[0]['inbound'] is False max_delay = context.constants.inbound_peer_delay + \ context.constants.getdata_interval margin = 2 self.log.info( f"Tx should be received at node 1 after {max_delay + margin} seconds") self.nodes[1].setmocktime(int(time.time()) + max_delay) self.sync_mempools(timeout=margin) def test_in_flight_max(self, context): max_getdata_in_flight = context.constants.max_getdata_in_flight max_inbound_delay = context.constants.inbound_peer_delay + \ context.constants.overloaded_peer_delay self.log.info( f"Test that we don't load peers with more than {max_getdata_in_flight} " "getdata requests immediately") - invids = [i for i in range(max_getdata_in_flight + 2)] + invids = list(range(max_getdata_in_flight + 2)) p = self.nodes[0].p2ps[0] with p2p_lock: p.getdata_count = 0 mock_time = int(time.time() + 1) self.nodes[0].setmocktime(mock_time) for i in range(max_getdata_in_flight): p.send_message(msg_inv([CInv(t=context.inv_type, h=invids[i])])) p.sync_with_ping() mock_time += context.constants.inbound_peer_delay self.nodes[0].setmocktime(mock_time) p.wait_until(lambda: p.getdata_count >= max_getdata_in_flight) for i in range(max_getdata_in_flight, len(invids)): p.send_message(msg_inv([CInv(t=context.inv_type, h=invids[i])])) p.sync_with_ping() self.log.info( f"No more than {max_getdata_in_flight} requests should be seen within " f"{max_inbound_delay - 1} seconds after announcement") self.nodes[0].setmocktime( mock_time + max_inbound_delay - 1) p.sync_with_ping() with p2p_lock: assert_equal(p.getdata_count, max_getdata_in_flight) self.log.info( f"If we wait {max_inbound_delay} seconds after announcement, we should " f"eventually get more requests") self.nodes[0].setmocktime( mock_time + max_inbound_delay) p.wait_until(lambda: p.getdata_count == len(invids)) def test_expiry_fallback(self, context): self.log.info( 'Check that expiry will select another peer for download') peer1 = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer2 = self.nodes[0].add_p2p_connection(context.p2p_conn()) for p in [peer1, peer2]: p.send_message(msg_inv([CInv(t=context.inv_type, h=0xffaa)])) # One of the peers is asked for the data peer2.wait_until( lambda: sum( p.getdata_count for p in [ peer1, peer2]) == 1) with p2p_lock: peer_expiry, peer_fallback = ( peer1, peer2) if peer1.getdata_count == 1 else ( peer2, peer1) assert_equal(peer_fallback.getdata_count, 0) # Wait for request to peer_expiry to expire self.nodes[0].setmocktime( int(time.time()) + context.constants.getdata_interval + 1) peer_fallback.wait_until( lambda: peer_fallback.getdata_count >= 1) with p2p_lock: assert_equal(peer_fallback.getdata_count, 1) # reset mocktime self.restart_node(0) def test_disconnect_fallback(self, context): self.log.info( 'Check that disconnect will select another peer for download') peer1 = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer2 = self.nodes[0].add_p2p_connection(context.p2p_conn()) for p in [peer1, peer2]: p.send_message(msg_inv([CInv(t=context.inv_type, h=0xffbb)])) # One of the peers is asked for the data peer2.wait_until( lambda: sum( p.getdata_count for p in [ peer1, peer2]) == 1) with p2p_lock: peer_disconnect, peer_fallback = ( peer1, peer2) if peer1.getdata_count == 1 else ( peer2, peer1) assert_equal(peer_fallback.getdata_count, 0) peer_disconnect.peer_disconnect() peer_disconnect.wait_for_disconnect() peer_fallback.wait_until( lambda: peer_fallback.getdata_count >= 1) with p2p_lock: assert_equal(peer_fallback.getdata_count, 1) def test_notfound_fallback(self, context): self.log.info( 'Check that notfounds will select another peer for download immediately') peer1 = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer2 = self.nodes[0].add_p2p_connection(context.p2p_conn()) for p in [peer1, peer2]: p.send_message(msg_inv([CInv(t=context.inv_type, h=0xffdd)])) # One of the peers is asked for the data peer2.wait_until( lambda: sum( p.getdata_count for p in [ peer1, peer2]) == 1) with p2p_lock: peer_notfound, peer_fallback = ( peer1, peer2) if peer1.getdata_count == 1 else ( peer2, peer1) assert_equal(peer_fallback.getdata_count, 0) # Send notfound, so that fallback peer is selected peer_notfound.send_and_ping(msg_notfound( vec=[CInv(context.inv_type, 0xffdd)])) peer_fallback.wait_until( lambda: peer_fallback.getdata_count >= 1) with p2p_lock: assert_equal(peer_fallback.getdata_count, 1) def test_preferred_inv(self, context): self.log.info( 'Check that invs from preferred peers are downloaded immediately') self.restart_node( 0, extra_args=self.extra_args[0] + ['-whitelist=noban@127.0.0.1']) peer = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer.send_message(msg_inv([CInv(t=context.inv_type, h=0xff00ff00)])) peer.wait_until(lambda: peer.getdata_count >= 1) with p2p_lock: assert_equal(peer.getdata_count, 1) def test_large_inv_batch(self, context): max_peer_announcements = context.constants.max_peer_announcements net_permissions = context.constants.bypass_request_limits_permission_flags self.log.info( f'Test how large inv batches are handled with {net_permissions} permission') self.restart_node( 0, extra_args=self.extra_args[0] + [f'-whitelist={net_permissions}@127.0.0.1']) peer = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer.send_message(msg_inv([CInv(t=context.inv_type, h=invid) for invid in range(max_peer_announcements + 1)])) peer.wait_until(lambda: peer.getdata_count == max_peer_announcements + 1) self.log.info( f'Test how large inv batches are handled without {net_permissions} permission') self.restart_node(0) peer = self.nodes[0].add_p2p_connection(context.p2p_conn()) peer.send_message(msg_inv([CInv(t=context.inv_type, h=invid) for invid in range(max_peer_announcements + 1)])) peer.wait_until(lambda: peer.getdata_count == max_peer_announcements) peer.sync_with_ping() with p2p_lock: assert_equal(peer.getdata_count, max_peer_announcements) def test_spurious_notfound(self, context): self.log.info('Check that spurious notfound is ignored') self.nodes[0].p2ps[0].send_message( msg_notfound(vec=[CInv(context.inv_type, 1)])) @skip(TX_TEST_CONTEXT) def test_immature_download(self, context): node = self.nodes[0] # Build a proof with immature utxos privkey, immature = gen_proof(self, node) proofid_hex = uint256_hex(immature.proofid) self.restart_node(0, extra_args=self.extra_args[0] + [ "-avaproofstakeutxoconfirmations=3", f"-avaproof={immature.serialize().hex()}", f"-avamasterkey={bytes_to_wif(privkey.get_bytes())}", ]) # Add an inbound so the node proof can be registered and advertised node.add_p2p_connection(P2PInterface()) self.generate(node, 1, sync_fun=self.no_op) wait_for_proof(node, proofid_hex, expect_status="immature") peer = node.add_p2p_connection(context.p2p_conn()) peer.send_message( msg_inv([CInv(t=context.inv_type, h=immature.proofid)])) # Give enough time for the node to eventually request the proof. node.setmocktime(int(time.time()) + context.constants.getdata_interval + 1) peer.sync_with_ping() assert_equal(peer.getdata_count, 0) @skip(TX_TEST_CONTEXT) def test_request_invalid_once(self, context): node = self.nodes[0] privkey = ECKey() privkey.generate() # Build an invalid proof (no stake) no_stake_hex = node.buildavalancheproof( 42, 2000000000, bytes_to_wif(privkey.get_bytes()), [] ) no_stake = avalanche_proof_from_hex(no_stake_hex) assert_raises_rpc_error(-8, "The proof is invalid: no-stake", node.verifyavalancheproof, no_stake_hex) # Send the proof msg = msg_avaproof() msg.proof = no_stake node.p2ps[0].send_message(msg) # Check we get banned node.p2ps[0].wait_for_disconnect() # Now that the node knows the proof is invalid, it should not be # requested anymore node.p2ps[1].send_message( msg_inv([CInv(t=context.inv_type, h=no_stake.proofid)])) # Give enough time for the node to eventually request the proof node.setmocktime(int(time.time()) + context.constants.getdata_interval + 1) node.p2ps[1].sync_with_ping() assert all(p.getdata_count == 0 for p in node.p2ps[1:]) def run_test(self): for context in [TX_TEST_CONTEXT, PROOF_TEST_CONTEXT]: self.log.info( f"Starting tests using {context.inv_name} inventory type") # Run tests without mocktime that only need one peer-connection first, # to avoid restarting the nodes self.test_expiry_fallback(context) self.test_disconnect_fallback(context) self.test_notfound_fallback(context) self.test_preferred_inv(context) self.test_large_inv_batch(context) self.test_spurious_notfound(context) # Run each test against new bitcoind instances, as setting mocktimes has long-term effects on when # the next trickle relay event happens. for test in [self.test_in_flight_max, self.test_inv_tx, self.test_data_requests, self.test_immature_download, self.test_request_invalid_once]: self.stop_nodes() self.start_nodes() self.connect_nodes(1, 0) # Setup the p2p connections self.peers = [] for node in self.nodes: for _ in range(NUM_INBOUND): self.peers.append( node.add_p2p_connection( context.p2p_conn())) self.log.info( f"Nodes are setup with {NUM_INBOUND} incoming connections each") test(context) if __name__ == '__main__': InventoryDownloadTest().main() diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 84b9798e9..3875eeb58 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -1,726 +1,726 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the rawtranscation RPCs. Test the following RPCs: - createrawtransaction - signrawtransactionwithwallet - sendrawtransaction - decoderawtransaction - getrawtransaction """ from collections import OrderedDict from decimal import Decimal from io import BytesIO from test_framework.messages import COutPoint, CTransaction, CTxIn, CTxOut, ToHex from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_raw_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, find_vout_for_address, ) class multidict(dict): """Dictionary that allows duplicate keys. Constructed with a list of (key, value) tuples. When dumped by the json module, will output invalid json with repeated keys, eg: >>> json.dumps(multidict([(1,2),(1,2)]) '{"1": 2, "1": 2}' Used to test calls to rpc methods with repeated keys in the json object.""" def __init__(self, x): dict.__init__(self, x) self.x = x def items(self): return self.x # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-txindex"], ["-txindex"], ["-txindex"]] # whitelist all peers to speed up tx relay / mempool sync for args in self.extra_args: args.append("-whitelist=noban@127.0.0.1") self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): super().setup_network() self.connect_nodes(0, 2) def run_test(self): self.log.info( 'prepare some coins for multiple *rawtransaction commands') self.generate(self.nodes[2], 1) self.generate(self.nodes[0], 101) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1500000) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1000000) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5000000) self.sync_all() self.generate(self.nodes[0], 5) self.log.info( 'Test getrawtransaction on genesis block coinbase returns an error') block = self.nodes[0].getblock(self.nodes[0].getblockhash(0)) assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot']) self.log.info( 'Check parameter types and required parameters of createrawtransaction') # Test `createrawtransaction` required parameters assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction) assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, []) # Test `createrawtransaction` invalid extra parameters assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, 'foo') # Test `createrawtransaction` invalid `inputs` txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {}) assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {}) assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {}) assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {}) assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {}) assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {}) assert_raises_rpc_error(-8, "Invalid parameter, vout must be a number", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {}) assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {}) assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {}) # Test `createrawtransaction` invalid `outputs` address = self.nodes[0].getnewaddress() address2 = self.nodes[0].getnewaddress() assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo') # Should not throw for backwards compatibility self.nodes[0].createrawtransaction(inputs=[], outputs={}) self.nodes[0].createrawtransaction(inputs=[], outputs=[]) assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'}) assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0}) assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'}) assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1}) assert_raises_rpc_error(-8, f"Invalid parameter, duplicated address: {address}", self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)])) assert_raises_rpc_error(-8, f"Invalid parameter, duplicated address: {address}", self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}]) assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}]) assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")])) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}]) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']]) # Test `createrawtransaction` invalid `locktime` assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo') assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1) assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296) self.log.info( 'Check that createrawtransaction accepts an array and object as outputs') tx = CTransaction() # One output tx.deserialize(BytesIO(bytes.fromhex(self.nodes[2].createrawtransaction( inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99})))) assert_equal(len(tx.vout), 1) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction( inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]), ) # Two outputs tx.deserialize(BytesIO(bytes.fromhex(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)]))))) assert_equal(len(tx.vout), 2) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {address: 99}, {address2: 99}]), ) # Multiple mixed outputs tx.deserialize(BytesIO(bytes.fromhex(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')]))))) assert_equal(len(tx.vout), 3) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {address: 99}, {address2: 99}, {'data': '99'}]), ) for type in ["legacy"]: addr = self.nodes[0].getnewaddress("", type) addrinfo = self.nodes[0].getaddressinfo(addr) pubkey = addrinfo["scriptPubKey"] self.log.info( f'sendrawtransaction with missing prevtx info ({type})') # Test `signrawtransactionwithwallet` invalid `prevtxs` inputs = [{'txid': txid, 'vout': 3, 'sequence': 1000}] outputs = {self.nodes[0].getnewaddress(): 1} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) - prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1) + prevtx = {"txid": txid, "scriptPubKey": pubkey, "vout": 3, "amount": 1} succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx]) assert succ["complete"] assert_raises_rpc_error(-8, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [ { "txid": txid, "scriptPubKey": pubkey, "vout": 3, } ]) assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [ { "txid": txid, "scriptPubKey": pubkey, "amount": 1, } ]) assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [ { "scriptPubKey": pubkey, "vout": 3, "amount": 1, } ]) assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [ { "txid": txid, "vout": 3, "amount": 1 } ]) ######################################### # sendrawtransaction with missing input # ######################################### self.log.info('sendrawtransaction with missing input') # won't exists inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1}] outputs = {self.nodes[0].getnewaddress(): 4998000} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) rawtx = pad_raw_tx(rawtx) rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx) # This will raise an exception since there are missing inputs assert_raises_rpc_error(-25, "bad-txns-inputs-missingorspent", self.nodes[2].sendrawtransaction, rawtx['hex']) ##################################### # getrawtransaction with block hash # ##################################### # make a tx by sending then generate 2 blocks; block1 has the tx in it tx = self.nodes[2].sendtoaddress( self.nodes[1].getnewaddress(), 1000000) block1, block2 = self.generate(self.nodes[2], 2) self.sync_all() # We should be able to get the raw transaction by providing the correct # block gottx = self.nodes[0].getrawtransaction(tx, True, block1) assert_equal(gottx['txid'], tx) assert_equal(gottx['in_active_chain'], True) # We should not have the 'in_active_chain' flag when we don't provide a # block gottx = self.nodes[0].getrawtransaction(tx, True) assert_equal(gottx['txid'], tx) assert 'in_active_chain' not in gottx # We should not get the tx if we provide an unrelated block assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2) # An invalid block hash should raise the correct errors assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True) assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar") assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234") assert_raises_rpc_error( -8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000") assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000") # Undo the blocks and check in_active_chain self.nodes[0].invalidateblock(block1) gottx = self.nodes[0].getrawtransaction( txid=tx, verbose=True, blockhash=block1) assert_equal(gottx['in_active_chain'], False) self.nodes[0].reconsiderblock(block1) assert_equal(self.nodes[0].getbestblockhash(), block2) if not self.options.descriptors: # The traditional multisig workflow does not work with descriptor # wallets so these are legacy only. # The multisig workflow with descriptor wallets uses PSBTs and is # tested elsewhere, no need to do them here. # # RAW TX MULTISIG TESTS # # # 2of2 test addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) # Tests for createmultisig and addmultisigaddress assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"]) # createmultisig can only take public keys self.nodes[0].createmultisig( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # addmultisigaddress can take both pubkeys and addresses so long as # they are in the wallet, which is tested here. assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr1])['address'] # use balance deltas instead of absolute values bal = self.nodes[2].getbalance() # send 1,200,000 XEC to msig adr txId = self.nodes[0].sendtoaddress(mSigObj, 1200000) self.sync_all() self.generate(self.nodes[0], 1) self.sync_all() # node2 has both keys of the 2of2 ms addr., tx should affect the # balance assert_equal(self.nodes[2].getbalance(), bal + Decimal('1200000.00')) # 2of3 test from different nodes bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr3 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) addr3Obj = self.nodes[2].getaddressinfo(addr3) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']] )['address'] txId = self.nodes[0].sendtoaddress(mSigObj, 2200000) decTx = self.nodes[0].gettransaction(txId) rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.generate(self.nodes[0], 1) self.sync_all() # THIS IS AN INCOMPLETE FEATURE # NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND # COUNT AT BALANCE CALCULATION # for now, assume the funds of a 2of3 multisig tx are not marked as # spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2200000.00')) bal = self.nodes[0].getbalance() inputs = [{ "txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value'], }] outputs = {self.nodes[0].getnewaddress(): 2190000} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet( rawTx, inputs) # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned['complete'], False) rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs) # node2 can sign the tx compl., own two of three keys assert_equal(rawTxSigned['complete'], True) self.nodes[2].sendrawtransaction(rawTxSigned['hex']) rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) self.sync_all() self.generate(self.nodes[0], 1) self.sync_all() assert_equal(self.nodes[0].getbalance(), bal + Decimal( '50000000.00') + Decimal('2190000.00')) # block reward + tx rawTxBlock = self.nodes[0].getblock( self.nodes[0].getbestblockhash()) # 2of2 test for combining transactions bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) self.nodes[1].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] mSigObjValid = self.nodes[2].getaddressinfo(mSigObj) txId = self.nodes[0].sendtoaddress(mSigObj, 2200000) decTx = self.nodes[0].gettransaction(txId) rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.generate(self.nodes[0], 1) self.sync_all() # the funds of a 2of2 multisig tx should not be marked as spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2200000.00')) bal = self.nodes[0].getbalance() inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}] outputs = {self.nodes[0].getnewaddress(): 2190000} rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet( rawTx2, inputs) self.log.debug(rawTxPartialSigned1) # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned1['complete'], False) rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet( rawTx2, inputs) self.log.debug(rawTxPartialSigned2) # node2 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned2['complete'], False) rawTxComb = self.nodes[2].combinerawtransaction( [rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']]) self.log.debug(rawTxComb) self.nodes[2].sendrawtransaction(rawTxComb) rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb) self.sync_all() self.generate(self.nodes[0], 1) self.sync_all() # block reward + tx assert_equal(self.nodes[0].getbalance(), bal + Decimal('50000000.00') + Decimal('2190000.00')) # Sanity checks on verbose getrawtransaction output txId = rawTx["txid"] rawTxOutput = self.nodes[0].getrawtransaction(txId, True) assert_equal(rawTxOutput["hex"], rawTxSigned["hex"]) assert_equal(rawTxOutput["txid"], txId) assert_equal(rawTxOutput["hash"], txId) assert_greater_than(rawTxOutput["size"], 300) assert_equal(rawTxOutput["version"], 0x02) assert_equal(rawTxOutput["locktime"], 0) assert_equal(len(rawTxOutput["vin"]), 1) assert_equal(len(rawTxOutput["vout"]), 1) assert_equal(rawTxOutput["blockhash"], rawTxBlock["hash"]) assert_equal(rawTxOutput["confirmations"], 3) assert_equal(rawTxOutput["time"], rawTxBlock["time"]) assert_equal(rawTxOutput["blocktime"], rawTxBlock["time"]) # Basic signrawtransaction test addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 10_000_000) self.generate(self.nodes[0], 1) self.sync_all() vout = find_vout_for_address(self.nodes[1], txid, addr) rawTx = self.nodes[1].createrawtransaction( [{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9_999_000}) rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx) txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex']) self.generate(self.nodes[0], 1) self.sync_all() # getrawtransaction tests # 1. valid parameters - only supply txid assert_equal( self.nodes[0].getrawtransaction(txId), rawTxSigned['hex']) # 2. valid parameters - supply txid and 0 for non-verbose assert_equal( self.nodes[0].getrawtransaction(txId, 0), rawTxSigned['hex']) # 3. valid parameters - supply txid and False for non-verbose assert_equal(self.nodes[0].getrawtransaction(txId, False), rawTxSigned['hex']) # 4. valid parameters - supply txid and 1 for verbose. # We only check the "hex" field of the output so we don't need to # update this test every time the output format changes. assert_equal(self.nodes[0].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex']) # 5. valid parameters - supply txid and True for non-verbose assert_equal(self.nodes[0].getrawtransaction(txId, True)["hex"], rawTxSigned['hex']) # 6. invalid parameters - supply txid and string "Flase" assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, "Flase") # 7. invalid parameters - supply txid and empty array assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txId, []) # 8. invalid parameters - supply txid and empty dict assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txId, {}) inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'sequence': 1000}] outputs = {self.nodes[0].getnewaddress(): 1} assert_raises_rpc_error( -8, 'Invalid parameter, missing vout key', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = "1" assert_raises_rpc_error( -8, 'Invalid parameter, vout must be a number', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = -1 assert_raises_rpc_error( -8, 'Invalid parameter, vout cannot be negative', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = 1 rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 1000) # 9. invalid parameters - sequence number out of range inputs[0]['sequence'] = -1 assert_raises_rpc_error( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) # 10. invalid parameters - sequence number out of range inputs[0]['sequence'] = 4294967296 assert_raises_rpc_error( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['sequence'] = 4294967294 rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) #################################### # TRANSACTION VERSION NUMBER TESTS # #################################### # Test the minimum transaction version number that fits in a signed # 32-bit integer. # As transaction version is unsigned, this should convert to its # unsigned equivalent. tx = CTransaction() tx.nVersion = -0x80000000 rawtx = ToHex(tx) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['version'], 0x80000000) # Test the maximum transaction version number that fits in a signed # 32-bit integer. tx = CTransaction() tx.nVersion = 0x7fffffff rawtx = ToHex(tx) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['version'], 0x7fffffff) self.log.info('sendrawtransaction/testmempoolaccept with maxfeerate') # Test a transaction with a small fee. txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 1000000) rawTx = self.nodes[0].getrawtransaction(txId, True) vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1000000.00')) self.sync_all() inputs = [{"txid": txId, "vout": vout['n']}] # Fee 10,000 satoshis, (1,000,000 - (10000 sat * 0.01 XEC/sat)) = # 999900 outputs = {self.nodes[0].getnewaddress(): Decimal("999900.00")} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx) assert_equal(rawTxSigned['complete'], True) # Fee 10,000 satoshis, ~200 b transaction, fee rate should land around 50 sat/byte = 500 XEC/kB # Thus, testmempoolaccept should reject testres = self.nodes[2].testmempoolaccept( [rawTxSigned['hex']], 500.00)[0] assert_equal(testres['allowed'], False) assert_equal(testres['reject-reason'], 'max-fee-exceeded') # and sendrawtransaction should throw assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 10.00) # and the following calls should both succeed testres = self.nodes[2].testmempoolaccept( rawtxs=[rawTxSigned['hex']])[0] assert_equal(testres['allowed'], True) self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex']) # Test a transaction with a large fee. txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 1000000) rawTx = self.nodes[0].getrawtransaction(txId, True) vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1000000.00')) self.sync_all() inputs = [{"txid": txId, "vout": vout['n']}] # Fee 2,000,000 satoshis, (1,000,000 - (2,000,000 sat * 0.01 XEC/sat)) = # 980000 outputs = {self.nodes[0].getnewaddress(): Decimal("980000.00")} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx) assert_equal(rawTxSigned['complete'], True) # Fee 2,000,000 satoshis, ~100 b transaction, fee rate should land around 20,000 sat/byte = 200,000 XEC/kB # Thus, testmempoolaccept should reject testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']])[0] assert_equal(testres['allowed'], False) assert_equal(testres['reject-reason'], 'max-fee-exceeded') # and sendrawtransaction should throw assert_raises_rpc_error(-25, 'Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)', self.nodes[2].sendrawtransaction, rawTxSigned['hex']) # and the following calls should both succeed testres = self.nodes[2].testmempoolaccept( rawtxs=[rawTxSigned['hex']], maxfeerate='200000.00')[0] assert_equal(testres['allowed'], True) self.nodes[2].sendrawtransaction( hexstring=rawTxSigned['hex'], maxfeerate='200000.00') self.log.info( 'sendrawtransaction/testmempoolaccept with tx that is already in the chain') self.generate(self.nodes[2], 1) for node in self.nodes: testres = node.testmempoolaccept([rawTxSigned['hex']])[0] assert_equal(testres['allowed'], False) assert_equal(testres['reject-reason'], 'txn-already-known') assert_raises_rpc_error( -27, 'Transaction already in block chain', node.sendrawtransaction, rawTxSigned['hex']) ########################################## # Decoding weird scripts in transactions # ########################################## self.log.info('Decode correctly-formatted but weird transactions') tx = CTransaction() # empty self.nodes[0].decoderawtransaction(ToHex(tx)) # truncated push tx.vin.append(CTxIn(COutPoint(42, 0), b'\x4e\x00\x00')) tx.vin.append(CTxIn(COutPoint(42, 0), b'\x4c\x10TRUNC')) tx.vout.append(CTxOut(0, b'\x4e\x00\x00')) tx.vout.append(CTxOut(0, b'\x4c\x10TRUNC')) self.nodes[0].decoderawtransaction(ToHex(tx)) # giant pushes and long scripts tx.vin.append( CTxIn(COutPoint(42, 0), CScript([b'giant push' * 10000]))) tx.vout.append(CTxOut(0, CScript([b'giant push' * 10000]))) self.nodes[0].decoderawtransaction(ToHex(tx)) self.log.info('Refuse garbage after transaction') assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, f"{ToHex(tx)}00") if __name__ == '__main__': RawTransactionsTest().main() diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 7290833a0..01b403f43 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -1,900 +1,901 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test objects for interacting with a bitcoind node over the p2p protocol. The P2PInterface objects interact with the bitcoind nodes under test using the node's p2p interface. They can be used to send messages to the node, and callbacks can be registered that execute when messages are received from the node. Messages are sent to/received from the node on an asyncio event loop. State held inside the objects must be guarded by the p2p_lock to avoid data races between the main testing thread and the event loop. P2PConnection: A low-level connection object to a node's P2P interface P2PInterface: A high-level interface object for communicating to a node over P2P P2PDataStore: A p2p interface class that keeps a store of transactions and blocks and can respond correctly to getdata and getheaders messages P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps a count of how many times each txid has been announced.""" import asyncio import logging import struct import sys import threading from collections import defaultdict from io import BytesIO from test_framework.messages import ( MAX_HEADERS_RESULTS, MSG_BLOCK, MSG_TX, MSG_TYPE_MASK, NODE_NETWORK, CBlockHeader, msg_addr, msg_addrv2, msg_avahello, msg_avapoll, msg_avaproof, msg_avaproofs, msg_avaproofsreq, msg_block, msg_blocktxn, msg_cfcheckpt, msg_cfheaders, msg_cfilter, msg_cmpctblock, msg_feefilter, msg_filteradd, msg_filterclear, msg_filterload, msg_getaddr, msg_getavaaddr, msg_getavaproofs, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_merkleblock, msg_notfound, msg_ping, msg_pong, msg_sendaddrv2, msg_sendcmpct, msg_sendheaders, msg_tcpavaresponse, msg_tx, msg_verack, msg_version, sha256, ) from test_framework.util import MAX_NODES, p2p_port, wait_until_helper logger = logging.getLogger("TestFramework.p2p") # The minimum P2P version that this test framework supports MIN_P2P_VERSION_SUPPORTED = 60001 # The P2P version that this test framework implements and sends in its `version` # message. Past bip-31 for ping/pong P2P_VERSION = 70014 # The services that this test framework offers in its `version` message P2P_SERVICES = NODE_NETWORK # The P2P user agent string that this test framework sends in its `version` # message P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" # Value for relay that this test framework sends in its `version` message P2P_VERSION_RELAY = 1 MESSAGEMAP = { b"addr": msg_addr, b"addrv2": msg_addrv2, b"avapoll": msg_avapoll, b"avaproof": msg_avaproof, b"avaproofs": msg_avaproofs, b"avaproofsreq": msg_avaproofsreq, b"avaresponse": msg_tcpavaresponse, b"avahello": msg_avahello, b"block": msg_block, b"blocktxn": msg_blocktxn, b"cfcheckpt": msg_cfcheckpt, b"cfheaders": msg_cfheaders, b"cfilter": msg_cfilter, b"cmpctblock": msg_cmpctblock, b"feefilter": msg_feefilter, b"filteradd": msg_filteradd, b"filterclear": msg_filterclear, b"filterload": msg_filterload, b"getaddr": msg_getaddr, b"getavaaddr": msg_getavaaddr, b"getavaproofs": msg_getavaproofs, b"getblocks": msg_getblocks, b"getblocktxn": msg_getblocktxn, b"getdata": msg_getdata, b"getheaders": msg_getheaders, b"headers": msg_headers, b"inv": msg_inv, b"mempool": msg_mempool, b"merkleblock": msg_merkleblock, b"notfound": msg_notfound, b"ping": msg_ping, b"pong": msg_pong, b"sendaddrv2": msg_sendaddrv2, b"sendcmpct": msg_sendcmpct, b"sendheaders": msg_sendheaders, b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, } MAGIC_BYTES = { "mainnet": b"\xe3\xe1\xf3\xe8", "testnet3": b"\xf4\xe5\xf3\xf4", "regtest": b"\xda\xb5\xbf\xfa", } class P2PConnection(asyncio.Protocol): """A low-level connection object to a node's P2P interface. This class is responsible for: - opening and closing the TCP connection to the node - reading bytes from and writing bytes to the socket - deserializing and serializing the P2P message header - logging messages as they are sent and received This class contains no logic for handing the P2P message payloads. It must be sub-classed and the on_message() callback overridden.""" def __init__(self): # The underlying transport of the connection. # Should only call methods on this from the NetworkThread, c.f. # call_soon_threadsafe self._transport = None @property def is_connected(self): return self._transport is not None def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): assert not self.is_connected self.timeout_factor = timeout_factor self.dstaddr = dstaddr self.dstport = dstport # The initial message to send after the connection was made: self.on_connection_send_msg = None self.on_connection_send_msg_is_raw = False self.recvbuf = b"" self.magic_bytes = MAGIC_BYTES[net] def peer_connect(self, dstaddr, dstport, *, net, timeout_factor): self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) loop = NetworkThread.network_event_loop logger.debug( f'Connecting to Bitcoin ABC Node: {self.dstaddr}:{self.dstport}') coroutine = loop.create_connection( lambda: self, host=self.dstaddr, port=self.dstport) return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) def peer_accept_connection( self, connect_id, connect_cb=lambda: None, *, net, timeout_factor): self.peer_connect_helper('0', 0, net, timeout_factor) logger.debug( f'Listening for Bitcoin ABC Node with id: {connect_id}') return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) def peer_disconnect(self): # Connection could have already been closed by other end. NetworkThread.network_event_loop.call_soon_threadsafe( lambda: self._transport and self._transport.abort()) # Connection and disconnection methods def connection_made(self, transport): """asyncio callback when a connection is opened.""" assert not self._transport logger.debug(f"Connected & Listening: {self.dstaddr}:{self.dstport}") self._transport = transport if self.on_connection_send_msg: if self.on_connection_send_msg_is_raw: self.send_raw_message(self.on_connection_send_msg) else: self.send_message(self.on_connection_send_msg) # Never used again self.on_connection_send_msg = None self.on_open() def connection_lost(self, exc): """asyncio callback when a connection is closed.""" if exc: logger.warning( f"Connection lost to {self.dstaddr}:{self.dstport} due to {exc}") else: logger.debug(f"Closed connection to: {self.dstaddr}:{self.dstport}") self._transport = None self.recvbuf = b"" self.on_close() # Socket read methods def data_received(self, t): """asyncio callback when data is read from the socket.""" with p2p_lock: if len(t) > 0: self.recvbuf += t while True: msg = self._on_data() if msg is None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with p2p_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != self.magic_bytes: raise ValueError( f"magic bytes mismatch: " f"{self.magic_bytes!r} != {self.recvbuf!r}") if len(self.recvbuf) < 4 + 12 + 4 + 4: return None msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] msglen = struct.unpack( " 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self, support_addrv2=False): super().__init__() # Track number of messages of each type received. # Should be read-only in a test. self.message_count = defaultdict(int) # Track the most recent message of each type. # To wait for a message to be received, pop that message from # this and use self.wait_until. self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 self.support_addrv2 = support_addrv2 def peer_connect_send_version(self, services): # Send a version msg vt = msg_version() vt.nVersion = P2P_VERSION vt.strSubVer = P2P_SUBVERSION vt.relay = P2P_VERSION_RELAY vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 # Will be sent in connection_made callback self.on_connection_send_msg = vt def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: self.peer_connect_send_version(services) return create_conn def peer_accept_connection(self, *args, services=NODE_NETWORK, **kwargs): create_conn = super().peer_accept_connection(*args, **kwargs) self.peer_connect_send_version(services) return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with p2p_lock: try: msgtype = message.msgtype.decode('ascii') self.message_count[msgtype] += 1 self.last_message[msgtype] = message getattr(self, f"on_{msgtype}")(message) except Exception: print(f"ERROR delivering {repr(message)} ({sys.exc_info()[0]})") raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_addrv2(self, message): pass def on_avapoll(self, message): pass def on_avaproof(self, message): pass def on_avaproofs(self, message): pass def on_avaproofsreq(self, message): pass def on_avaresponse(self, message): pass def on_avahello(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cfcheckpt(self, message): pass def on_cfheaders(self, message): pass def on_cfilter(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_filteradd(self, message): pass def on_filterclear(self, message): pass def on_filterload(self, message): pass def on_getaddr(self, message): pass def on_getavaaddr(self, message): pass def on_getavaproofs(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_merkleblock(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass def on_sendaddrv2(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, \ f"Version {message.nVersion} received. Test framework only supports " \ f"versions greater than {MIN_P2P_VERSION_SUPPORTED}" self.send_message(msg_verack()) if self.support_addrv2: self.send_message(msg_sendaddrv2()) self.nServices = message.nServices self.send_message(msg_getaddr()) # Connection helper methods def wait_until(self, test_function_in, *, timeout=60, check_connected=True): def test_function(): if check_connected: assert self.is_connected return test_function_in() wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) def wait_for_connect(self, timeout=60): def test_function(): return self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) def wait_for_disconnect(self, timeout=60): def test_function(): return not self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid self.wait_until(test_function, timeout=timeout) def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): last_headers = self.last_message.get('headers') if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_getdata(self, hash_list, timeout=60): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" def test_function(): last_data = self.last_message.get("getdata") if not last_data: return False return [x.hash for x in last_data.inv] == hash_list self.wait_until(test_function, timeout=timeout) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" def test_function(): return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash self.wait_until(test_function, timeout=timeout) def wait_for_verack(self, timeout=60): def test_function(): return "verack" in self.last_message self.wait_until(test_function, timeout=timeout) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) def sync_send_with_ping(self, timeout=60): """Ensure SendMessages is called on this connection""" # Calling sync_with_ping twice requires that the node calls # `ProcessMessage` twice, and thus ensures `SendMessages` must have # been called at least once self.sync_with_ping() self.sync_with_ping() def sync_with_ping(self, timeout=60): """Ensure ProcessMessages is called on this connection""" self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): return self.last_message.get( "pong") and self.last_message["pong"].nonce == self.ping_counter self.wait_until(test_function, timeout=timeout) self.ping_counter += 1 # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. p2p_lock = threading.Lock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be # created assert not self.network_event_loop NetworkThread.listeners = {} NetworkThread.protos = {} NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe( self.network_event_loop.stop) wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None @classmethod def listen(cls, p2p, callback, port=None, addr=None, idx=1): """ Ensure a listening server is running on the given port, and run the protocol specified by `p2p` on the next connection to it. Once ready for connections, call `callback`.""" if port is None: assert 0 < idx <= MAX_NODES port = p2p_port(MAX_NODES - idx) if addr is None: addr = '127.0.0.1' coroutine = cls.create_listen_server(addr, port, callback, p2p) cls.network_event_loop.call_soon_threadsafe( cls.network_event_loop.create_task, coroutine) @classmethod async def create_listen_server(cls, addr, port, callback, proto): def peer_protocol(): """Returns a function that does the protocol handling for a new connection. To allow different connections to have different behaviors, the protocol function is first put in the cls.protos dict. When the connection is made, the function removes the protocol function from that dict, and returns it so the event loop can start executing it.""" response = cls.protos.get((addr, port)) cls.protos[(addr, port)] = None return response if (addr, port) not in cls.listeners: # When creating a listener on a given (addr, port) we only need to # do it once. If we want different behaviors for different # connections, we can accomplish this by providing different # `proto` functions listener = await cls.network_event_loop.create_server(peer_protocol, addr, port) logger.debug( f"Listening server on {addr}:{port} should be started") cls.listeners[(addr, port)] = listener cls.protos[(addr, port)] = proto callback(addr, port) class P2PDataStore(P2PInterface): """A P2P data store class. Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug( f'getdata message type {hex(inv.type)} received.') def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader( self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug( f'block hash {hex(prev_block_hash)} not found in block store') break # Truncate the list if there are too many headers headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with p2p_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 def test(): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message( msg_headers([CBlockHeader(block) for block in blocks])) self.wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, check_connected=success, ) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: for tx in txs: self.tx_store[tx.sha256] = tx def test(): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, f"{tx.hash} not found in mempool" else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, f"{tx.hash} tx found in mempool" if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() class P2PTxInvStore(P2PInterface): """A P2PInterface which stores a count of how many times each txid has been announced.""" def __init__(self): super().__init__() self.tx_invs_received = defaultdict(int) def on_inv(self, message): # Send getdata in response. super().on_inv(message) # Store how many times invs have been received for each tx. for i in message.inv: if i.type == MSG_TX: # save txid self.tx_invs_received[i.hash] += 1 def get_invs(self): with p2p_lock: return list(self.tx_invs_received.keys()) def wait_for_broadcast(self, txns, timeout=60): """Waits for the txns (list of txids) to complete initial broadcast. The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. - self.wait_until(lambda: set(self.tx_invs_received.keys()) == set( - [int(tx, 16) for tx in txns]), timeout=timeout) + self.wait_until( + lambda: set(self.tx_invs_received.keys()) == {int(tx, 16) for tx in txns}, + timeout=timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 54029c93e..5b82f3fe4 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1,944 +1,944 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" import argparse import configparser import logging import os import pdb import platform import random import shutil import subprocess import sys import tempfile import time from enum import Enum from typing import List from . import coverage from .address import ADDRESS_ECREG_P2SH_OP_TRUE from .authproxy import JSONRPCException from .avatools import get_proof_ids from .p2p import NetworkThread from .test_node import TestNode from .util import ( MAX_NODES, PortSeed, assert_equal, check_json_precision, chronik_port, get_datadir_path, initialize_datadir, p2p_port, rpc_port, uint256_hex, wait_until_helper, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 # Timestamp is Sep. 20th, 2022 at 12:00:00 TIMESTAMP_IN_THE_PAST = 1663675200 TMPDIR_PREFIX = "bitcoin_func_test_" class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message class BitcoinTestMetaClass(type): """Metaclass for BitcoinTestFramework. Ensures that any attempt to register a subclass of `BitcoinTestFramework` adheres to a standard whereby the subclass overrides `set_test_params` and `run_test` but DOES NOT override either `__init__` or `main`. If any of those standards are violated, a ``TypeError`` is raised.""" def __new__(cls, clsname, bases, dct): if not clsname == 'BitcoinTestFramework': if not ('run_test' in dct and 'set_test_params' in dct): raise TypeError("BitcoinTestFramework subclasses must override " "'run_test' and 'set_test_params'") if '__init__' in dct or 'main' in dct: raise TypeError("BitcoinTestFramework subclasses may not override " "'__init__' or 'main'") return super().__new__(cls, clsname, bases, dct) class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.chain: str = 'regtest' self.setup_clean_chain: bool = False self.nodes: List[TestNode] = [] self.network_thread = None # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 self.supports_cli = True self.bind_to_localhost_only = True self.parse_args() self.default_wallet_name = "" self.wallet_data_filename = "wallet.dat" # Optional list of wallet names that can be set in set_test_params to # create and import keys to. If unset, default is len(nodes) * # [default_wallet_name]. If wallet names are None, wallet creation is # skipped. If list is truncated, wallet creation is skipped and keys # are not imported. self.wallet_names = None # Disable ThreadOpenConnections by default, so that adding entries to # addrman will not result in automatic connections to them. self.disable_autoconnect = True self.set_test_params() if self.options.timeout_factor == 0: self.options.timeout_factor = 99999 # optionally, increase timeout by a factor self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) def main(self): """Main function. This should not be overridden by the subclass test scripts.""" assert hasattr( self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" try: self.setup() self.run_test() except JSONRPCException: self.log.exception("JSONRPC error") self.success = TestStatus.FAILED except SkipTest as e: self.log.warning(f"Test Skipped: {e.message}") self.success = TestStatus.SKIPPED except AssertionError: self.log.exception("Assertion failed") self.success = TestStatus.FAILED except KeyError: self.log.exception("Key error") self.success = TestStatus.FAILED except subprocess.CalledProcessError as e: self.log.exception(f"Called Process failed with '{e.output}'") self.success = TestStatus.FAILED except Exception: self.log.exception("Unexpected exception caught during testing") self.success = TestStatus.FAILED except KeyboardInterrupt: self.log.warning("Exiting after keyboard interrupt") self.success = TestStatus.FAILED finally: exit_code = self.shutdown() sys.exit(exit_code) def parse_args(self): parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(f"{os.path.dirname(os.path.realpath(__file__))}/../../cache"), help="Directory for caching pregenerated datadirs (default: %(default)s)") parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, help="The seed to use for assigning port numbers (default: current process id)") parser.add_argument("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_argument("--configfile", dest="configfile", default=os.path.abspath(os.path.dirname(os.path.realpath( __file__)) + "/../../config.ini"), help="Location of the test framework config file (default: %(default)s)") parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", help="use bitcoin-cli instead of RPC for all commands") parser.add_argument("--perf", dest="perf", default=False, action="store_true", help="profile running nodes with perf for the duration of the test") parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true", help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required") parser.add_argument("--randomseed", type=int, help="set a random seed for deterministically reproducing a previous test run") parser.add_argument("--descriptors", default=False, action="store_true", help="Run test using a descriptor wallet") parser.add_argument("--with-wellingtonactivation", dest="wellingtonactivation", default=False, action="store_true", help=f"Activate wellington update on timestamp {TIMESTAMP_IN_THE_PAST}") parser.add_argument( '--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. ' 'Setting it to 0 disables all timeouts') self.add_options(parser) self.options = parser.parse_args() def setup(self): """Call this method to start up the test framework object with options set.""" PortSeed.n = self.options.port_seed check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) config = configparser.ConfigParser() config.read_file(open(self.options.configfile, encoding='utf-8')) self.config = config fname_bitcoind = os.path.join( config["environment"]["BUILDDIR"], "src", f"bitcoind{config['environment']['EXEEXT']}" ) fname_bitcoincli = os.path.join( config["environment"]["BUILDDIR"], "src", f"bitcoin-cli{config['environment']['EXEEXT']}" ) self.options.bitcoind = os.getenv("BITCOIND", default=fname_bitcoind) self.options.bitcoincli = os.getenv( "BITCOINCLI", default=fname_bitcoincli) self.options.emulator = config["environment"]["EMULATOR"] or None os.environ['PATH'] = config['environment']['BUILDDIR'] + os.pathsep + \ config['environment']['BUILDDIR'] + os.path.sep + "qt" + os.pathsep + \ os.environ['PATH'] # Add test dir to sys.path (to access generated modules) sys.path.append(os.path.join(config['environment']['BUILDDIR'], "test")) # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX) self._start_logging() # Seed the PRNG. Note that test runs are reproducible if and only if # a single thread accesses the PRNG. For more information, see # https://docs.python.org/3/library/random.html#notes-on-reproducibility. # The network thread shouldn't access random. If we need to change the # network thread to access randomness, it should instantiate its own # random.Random object. seed = self.options.randomseed if seed is None: seed = random.randrange(sys.maxsize) else: self.log.debug(f"User supplied random seed {seed}") random.seed(seed) self.log.debug(f"PRNG seed is: {seed}") self.log.debug('Setting up network thread') self.network_thread = NetworkThread() self.network_thread.start() if self.options.usecli: if not self.supports_cli: raise SkipTest( "--usecli specified but test does not support using CLI") self.skip_if_no_cli() self.skip_test_if_missing_module() self.setup_chain() self.setup_network() self.success = TestStatus.PASSED def shutdown(self): """Call this method to shut down the test framework object.""" if self.success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() self.log.debug('Closing down network thread') self.network_thread.close() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: for node in self.nodes: node.cleanup_on_exit = False self.log.info( "Note: bitcoinds were not stopped and may still be running") should_clean_up = ( not self.options.nocleanup and not self.options.noshutdown and self.success != TestStatus.FAILED and not self.options.perf ) if should_clean_up: self.log.info(f"Cleaning up {self.options.tmpdir} on exit") cleanup_tree_on_exit = True elif self.options.perf: self.log.warning( f"Not cleaning up dir {self.options.tmpdir} due to perf data") cleanup_tree_on_exit = False else: self.log.warning( f"Not cleaning up dir {self.options.tmpdir}") cleanup_tree_on_exit = False if self.success == TestStatus.PASSED: self.log.info("Tests successful") exit_code = TEST_EXIT_PASSED elif self.success == TestStatus.SKIPPED: self.log.info("Test skipped") exit_code = TEST_EXIT_SKIPPED else: self.log.error( f"Test failed. Test logging available at {self.options.tmpdir}" f"/test_framework.log") self.log.error("") combine_logs_path = os.path.normpath( f'{os.path.dirname(os.path.realpath(__file__))}/../combine_logs.py') self.log.error( f"Hint: Call {combine_logs_path} '{self.options.tmpdir}' to " f"consolidate all logs") self.log.error("") self.log.error( "If this failure happened unexpectedly or intermittently, please" " file a bug and provide a link or upload of the combined log.") self.log.error(self.config['environment']['PACKAGE_BUGREPORT']) self.log.error("") exit_code = TEST_EXIT_FAILED # Logging.shutdown will not remove stream- and filehandlers, so we must # do it explicitly. Handlers are removed so the next test run can apply # different log handler settings. # See: https://docs.python.org/3/library/logging.html#logging.shutdown for h in list(self.log.handlers): h.flush() h.close() self.log.removeHandler(h) rpc_logger = logging.getLogger("BitcoinRPC") for h in list(rpc_logger.handlers): h.flush() rpc_logger.removeHandler(h) if cleanup_tree_on_exit: shutil.rmtree(self.options.tmpdir) self.nodes.clear() return exit_code # Methods to override in subclass test scripts. def set_test_params(self): """Tests must this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def skip_test_if_missing_module(self): """Override this method to skip a test if a module is not compiled""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info(f"Initializing test directory {self.options.tmpdir}") if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. # # Topology looks like this: # node0 <-- node1 <-- node2 <-- node3 # # If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To # ensure block propagation, all nodes will establish outgoing connections toward node0. # See fPreferredDownload in net_processing. # # If further outbound connections are needed, they can be added at the beginning of the test with e.g. # self.connect_nodes(1, 2) for i in range(self.num_nodes - 1): self.connect_nodes(i + 1, i) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = [[]] * self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() if self.is_wallet_compiled(): self.import_deterministic_coinbase_privkeys() if not self.setup_clean_chain: for n in self.nodes: assert_equal(n.getblockchaininfo()["blocks"], 199) # To ensure that all nodes are out of IBD, the most recent block # must have a timestamp not too old (see IsInitialBlockDownload()). self.log.debug('Generate a block with current time') block_hash = self.generate( self.nodes[0], 1, sync_fun=self.no_op)[0] block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0) for n in self.nodes: n.submitblock(block) chain_info = n.getblockchaininfo() assert_equal(chain_info["blocks"], 200) assert_equal(chain_info["initialblockdownload"], False) def import_deterministic_coinbase_privkeys(self): wallet_names = ( [self.default_wallet_name] * len(self.nodes) if self.wallet_names is None else self.wallet_names ) assert len(wallet_names) <= len(self.nodes) for wallet_name, n in zip(wallet_names, self.nodes): if wallet_name is not None: n.createwallet( wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True) n.importprivkey( privkey=n.get_deterministic_priv_key().key, label='coinbase') def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test # scripts. def add_nodes(self, num_nodes: int, extra_args=None, *, host=None, binary=None): """Instantiate TestNode objects. Should only be called once after the nodes have been specified in set_test_params().""" if self.bind_to_localhost_only: extra_confs = [["bind=127.0.0.1"]] * num_nodes else: extra_confs = [[]] * num_nodes if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [self.options.bitcoind] * num_nodes assert_equal(len(extra_confs), num_nodes) assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode( i, get_datadir_path(self.options.tmpdir, i), chain=self.chain, host=host, rpc_port=rpc_port(i), p2p_port=p2p_port(i), chronik_port=chronik_port(i), timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, coverage_dir=self.options.coveragedir, cwd=self.options.tmpdir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli, emulator=self.options.emulator, start_perf=self.options.perf, use_valgrind=self.options.valgrind, descriptors=self.options.descriptors, )) if self.options.wellingtonactivation: self.nodes[i].extend_default_args( [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"]) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except BaseException: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in self.nodes: coverage.write_all_rpc_commands( self.options.coveragedir, node.rpc) def stop_node(self, i, expected_stderr='', wait=0): """Stop a bitcoind test node""" self.nodes[i].stop_node(expected_stderr, wait=wait) def stop_nodes(self, wait=0): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node(wait=wait, wait_until_stopped=False) for node in self.nodes: # Wait for nodes to stop node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def connect_nodes(self, a, b): from_node = self.nodes[a] to_node = self.nodes[b] host = to_node.host if host is None: host = '127.0.0.1' ip_port = f"{host}:{str(to_node.p2p_port)}" from_node.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying # See comments in net_processing: # * Must have a version message before anything else # * Must have a verack message before anything else wait_until_helper( lambda: all(peer['version'] != 0 for peer in from_node.getpeerinfo())) wait_until_helper( lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_node.getpeerinfo())) def disconnect_nodes(self, a, b): from_node = self.nodes[a] to_node = self.nodes[b] def get_peer_ids(): result = [] for peer in from_node.getpeerinfo(): if to_node.name in peer['subver']: result.append(peer['id']) return result peer_ids = get_peer_ids() if not peer_ids: self.log.warning( f"disconnect_nodes: {from_node.index} and {to_node.index} were not " "connected") return for peer_id in peer_ids: try: from_node.disconnectnode(nodeid=peer_id) except JSONRPCException as e: # If this node is disconnected between calculating the peer id # and issuing the disconnect, don't worry about it. # This avoids a race condition if we're mass-disconnecting # peers. if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED raise # wait to disconnect wait_until_helper(lambda: not get_peer_ids(), timeout=5) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ self.disconnect_nodes(1, 2) self.sync_all(self.nodes[:2]) self.sync_all(self.nodes[2:]) def join_network(self): """ Join the (previously split) network halves together. """ self.connect_nodes(1, 2) self.sync_all() def no_op(self): pass def generate(self, generator, *args, sync_fun=None, **kwargs): blocks = generator.generate(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generateblock(self, generator, *args, sync_fun=None, **kwargs): blocks = generator.generateblock(*args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs): blocks = generator.generatetoaddress( *args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs): blocks = generator.generatetodescriptor( *args, invalid_call=False, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def sync_blocks(self, nodes=None, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout while time.time() <= stop_time: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash.count(best_hash[0]) == len(rpc_connections): return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) best_hashes = "".join(f"\n {b!r}" for b in best_hash) raise AssertionError(f"Block sync timed out after {timeout}s:{best_hashes}") def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True): """ Wait until everybody has the same transactions in their memory pools """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout while time.time() <= stop_time: pool = [set(r.getrawmempool()) for r in rpc_connections] if pool.count(pool[0]) == len(rpc_connections): if flush_scheduler: for r in rpc_connections: r.syncwithvalidationinterfacequeue() return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) pool_str = "".join(f"\n {m!r}" for m in pool) raise AssertionError(f"Mempool sync timed out after {timeout}s:{pool_str}") def sync_proofs(self, nodes=None, wait=1, timeout=60): """ Wait until everybody has the same proofs in their proof pools """ rpc_connections = nodes or self.nodes timeout = int(timeout * self.options.timeout_factor) stop_time = time.time() + timeout def format_ids(id_list): """Convert ProodIDs to hex strings for easier debugging""" - return list(uint256_hex(i) for i in id_list) + return [uint256_hex(i) for i in id_list] while time.time() <= stop_time: nodes_proofs = [ set(format_ids(get_proof_ids(r))) for r in rpc_connections] if nodes_proofs.count(nodes_proofs[0]) == len(rpc_connections): return # Check that each peer has at least one connection assert (all([len(x.getpeerinfo()) for x in rpc_connections])) time.sleep(wait) nodes_proofs_str = "".join(f"\n {m!r}" for m in nodes_proofs) raise AssertionError( f"Proofs sync timed out after {timeout}s:{nodes_proofs_str}") def sync_all(self, nodes=None): self.sync_blocks(nodes) self.sync_mempools(nodes) self.sync_proofs(nodes) def wait_until(self, test_function, timeout=60): return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor) # Private helper methods. These should not be accessed by the subclass # test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler( f"{self.options.tmpdir}/test_framework.log", encoding='utf-8') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit( ) else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so # log files can be concatenated and sorted) formatter = logging.Formatter( fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 199-block-long chain Afterward, create num_nodes copies from the cache.""" # Use node 0 to create the cache for all other nodes CACHE_NODE_ID = 0 cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID) assert self.num_nodes <= MAX_NODES if not os.path.isdir(cache_node_dir): self.log.debug( f"Creating cache directory {cache_node_dir}") initialize_datadir( self.options.cachedir, CACHE_NODE_ID, self.chain, self.disable_autoconnect, ) self.nodes.append( TestNode( CACHE_NODE_ID, cache_node_dir, chain=self.chain, extra_conf=["bind=127.0.0.1"], extra_args=['-disablewallet'], host=None, rpc_port=rpc_port(CACHE_NODE_ID), p2p_port=p2p_port(CACHE_NODE_ID), chronik_port=chronik_port(CACHE_NODE_ID), timewait=self.rpc_timeout, timeout_factor=self.options.timeout_factor, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, coverage_dir=None, cwd=self.options.tmpdir, descriptors=self.options.descriptors, emulator=self.options.emulator, )) if self.options.wellingtonactivation: self.nodes[CACHE_NODE_ID].extend_default_args( [f"-wellingtonactivationtime={TIMESTAMP_IN_THE_PAST}"]) self.start_node(CACHE_NODE_ID) cache_node = self.nodes[CACHE_NODE_ID] # Wait for RPC connections to be ready cache_node.wait_for_rpc_connection() # Set a time in the past, so that blocks don't end up in the future cache_node.setmocktime( cache_node.getblockheader( cache_node.getbestblockhash())['time']) # Create a 199-block-long chain; each of the 3 first nodes gets 25 # mature blocks and 25 immature. # The 4th address gets 25 mature and only 24 immature blocks so that # the very last block in the cache does not age too much (have an # old tip age). # This is needed so that we are out of IBD when the test starts, # see the tip age check in IsInitialBlockDownload(). gen_addresses = [ k.address for k in TestNode.PRIV_KEYS][:3] + [ADDRESS_ECREG_P2SH_OP_TRUE] assert_equal(len(gen_addresses), 4) for i in range(8): self.generatetoaddress( cache_node, nblocks=25 if i != 7 else 24, address=gen_addresses[i % len(gen_addresses)], ) assert_equal(cache_node.getblockchaininfo()["blocks"], 199) # Shut it down, and clean up cache directories: self.stop_nodes() self.nodes = [] def cache_path(*paths): return os.path.join(cache_node_dir, self.chain, *paths) # Remove empty wallets dir os.rmdir(cache_path('wallets')) for entry in os.listdir(cache_path()): # Only keep indexes, chainstate and blocks folders if entry not in ['chainstate', 'blocks', 'indexes']: os.remove(cache_path(entry)) for i in range(self.num_nodes): self.log.debug( f"Copy cache directory {cache_node_dir} to node {i}") to_dir = get_datadir_path(self.options.tmpdir, i) shutil.copytree(cache_node_dir, to_dir) # Overwrite port/rpcport in bitcoin.conf initialize_datadir( self.options.tmpdir, i, self.chain, self.disable_autoconnect, ) def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir( self.options.tmpdir, i, self.chain, self.disable_autoconnect, ) def skip_if_no_py3_zmq(self): """Attempt to import the zmq package and skip the test if the import fails.""" try: import zmq # noqa except ImportError: raise SkipTest("python3-zmq module not available.") def skip_if_no_python_bcc(self): """Attempt to import the bcc package and skip the tests if the import fails.""" try: import bcc # type: ignore[import] # noqa: F401 except ImportError: raise SkipTest("bcc python module not available") def skip_if_no_bitcoind_tracepoints(self): """Skip the running test if bitcoind has not been compiled with USDT tracepoint support.""" if not self.is_usdt_compiled(): raise SkipTest( "bitcoind has not been built with USDT tracepoints enabled.") def skip_if_no_bpf_permissions(self): """Skip the running test if we don't have permissions to do BPF syscalls and load BPF maps.""" # check for 'root' permissions if os.geteuid() != 0: raise SkipTest( "no permissions to use BPF (please review the tests carefully before running them with higher privileges)") def skip_if_platform_not_linux(self): """Skip the running test if we are not on a Linux platform""" if platform.system() != "Linux": raise SkipTest("not on a Linux system") def skip_if_no_bitcoind_zmq(self): """Skip the running test if bitcoind has not been compiled with zmq support.""" if not self.is_zmq_compiled(): raise SkipTest("bitcoind has not been built with zmq enabled.") def skip_if_no_wallet(self): """Skip the running test if wallet has not been compiled.""" if not self.is_wallet_compiled(): raise SkipTest("wallet has not been compiled.") def skip_if_no_wallet_tool(self): """Skip the running test if bitcoin-wallet has not been compiled.""" if not self.is_wallet_tool_compiled(): raise SkipTest("bitcoin-wallet has not been compiled") def skip_if_no_cli(self): """Skip the running test if bitcoin-cli has not been compiled.""" if not self.is_cli_compiled(): raise SkipTest("bitcoin-cli has not been compiled.") def skip_if_no_chronik(self): """Skip the running test if Chronik indexer has not been compiled.""" if not self.is_chronik_compiled(): raise SkipTest("Chronik indexer has not been compiled.") def is_cli_compiled(self): """Checks whether bitcoin-cli was compiled.""" return self.config["components"].getboolean("ENABLE_CLI") def is_wallet_compiled(self): """Checks whether the wallet module was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET") def is_wallet_tool_compiled(self): """Checks whether bitcoin-wallet was compiled.""" return self.config["components"].getboolean("ENABLE_WALLET_TOOL") def is_chronik_compiled(self): """Checks whether Chronik indexer was compiled.""" return self.config["components"].getboolean("ENABLE_CHRONIK") def is_zmq_compiled(self): """Checks whether the zmq module was compiled.""" return self.config["components"].getboolean("ENABLE_ZMQ") def is_usdt_compiled(self): """Checks whether the USDT tracepoints were compiled.""" return self.config["components"].getboolean("ENABLE_USDT_TRACEPOINTS") diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index aa4160640..7a03d5f79 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,1040 +1,1040 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import collections import contextlib import decimal import errno import http.client import json import logging import os import re import shlex import subprocess import sys import tempfile import time import urllib.parse from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional from .address import ADDRESS_ECREG_UNSPENDABLE from .authproxy import JSONRPCException from .descriptors import descsum_create from .messages import XEC, CTransaction, FromHex from .p2p import P2P_SUBVERSION from .util import ( EncodeDecimal, append_config, assert_equal, delete_cookie_file, get_auth_cookie, get_rpc_proxy, p2p_port, rpc_url, wait_until_helper, ) BITCOIND_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode: """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, chronik_port, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, use_valgrind=False, descriptors=False): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as the node starts. """ self.index = i self.p2p_conn_index = 1 self.datadir = datadir self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf") self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.chain = chain self.host = host self.rpc_port = rpc_port self.p2p_port = p2p_port self.chronik_port = chronik_port self.name = f"testnode-{i}" self.rpc_timeout = timewait self.binary = bitcoind if not os.path.isfile(self.binary): raise FileNotFoundError( f"Binary '{self.binary}' could not be found.\nTry setting it manually:\n" f"\tBITCOIND= {sys.argv[0]}") self.coverage_dir = coverage_dir self.cwd = cwd self.descriptors = descriptors if extra_conf is not None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the default list # below. # For those callers that need more flexibility, they can access the # default args using the provided facilities. # Note that common args are set in the config file (see # initialize_datadir) self.extra_args = extra_args # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. # This means that starting a bitcoind using the temp dir to debug a failed test won't # spam debug.log. self.default_args = [ "-datadir=" + self.datadir, "-logtimemicros", "-logthreadnames", "-logsourcelocations", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-uacomment=" + self.name ] if use_valgrind: default_suppressions_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "..", "..", "contrib", "valgrind.supp") suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", default_suppressions_file) self.binary = "valgrind" self.bitcoind_args = [bitcoind] + self.default_args self.default_args = [f"--suppressions={suppressions_file}", "--gen-suppressions=all", "--exit-on-first-error=yes", "--error-exitcode=1", "--quiet"] + self.bitcoind_args if emulator is not None: if not os.path.isfile(emulator): raise FileNotFoundError( f"Emulator '{emulator}' could not be found.") self.emulator = emulator if use_cli and not os.path.isfile(bitcoin_cli): raise FileNotFoundError( f"Binary '{bitcoin_cli}' could not be found.\nTry setting it manually:\n" f"\tBITCOINCLI= {sys.argv[0]}") self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator) self.use_cli = use_cli self.start_perf = start_perf self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.relay_fee_cache = None self.log = logging.getLogger(f'TestFramework.node{i}') # Whether to kill the node when this object goes away self.cleanup_on_exit = True # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} self.p2ps = [] self.timeout_factor = timeout_factor AddressKeyPair = collections.namedtuple( 'AddressKeyPair', ['address', 'key']) PRIV_KEYS = [ # address , privkey AddressKeyPair( 'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), AddressKeyPair( 'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), AddressKeyPair( 'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), AddressKeyPair( 'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), AddressKeyPair( 'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), AddressKeyPair( 'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), AddressKeyPair( 'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), AddressKeyPair( 'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), AddressKeyPair( 'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), AddressKeyPair( 'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), AddressKeyPair( 'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), AddressKeyPair( 'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), ] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" num_keys = len(self.PRIV_KEYS) assert self.index < num_keys, \ f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if " \ f"more are needed." return self.PRIV_KEYS[self.index] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return f"[node {self.index}] {msg}" def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr( RPCOverloadWrapper(self.cli, True, self.descriptors), name) else: assert self.rpc is not None, self._node_msg( "Error: RPC not initialized") assert self.rpc_connected, self._node_msg( "Error: No RPC connection") return getattr( RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) def clear_default_args(self): self.default_args.clear() def extend_default_args(self, args): self.default_args.extend(args) def remove_default_args(self, args): for rm_arg in args: # Remove all occurrences of rm_arg in self.default_args: # - if the arg is a flag (-flag), then the names must match # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). self.default_args = [def_arg for def_arg in self.default_args if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoind is started if stderr is None: stderr = tempfile.NamedTemporaryFile( dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile( dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout if cwd is None: cwd = self.cwd # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir, self.chain) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are # written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") p_args = [self.binary] + self.default_args + extra_args if self.emulator is not None: p_args = [self.emulator] + p_args self.process = subprocess.Popen( p_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") if self.start_perf: self._start_perf() def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( f'bitcoind exited with status {self.process.returncode} during ' f'initialization')) try: rpc = get_rpc_proxy( rpc_url( self.datadir, self.chain, self.host, self.rpc_port), self.index, # Shorter timeout to allow for one retry in case of # ETIMEDOUT timeout=self.rpc_timeout // 2, coveragedir=self.coverage_dir ) rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC # connection is up wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there # is no guarantee and sometimes ThreadImport might finish # later. This is going to cause intermittent test failures, # because generally the tests assume the node is fully # ready after being started. # # For example, the node will reject block messages from p2p # when it is still importing with the error "Unexpected # block message received" # # The wait is done here to make tests as robust as possible # and prevent racy tests and intermittent failures as much # as possible. Some tests might not need this, but the # overhead is trivial, and the added guarantees are worth # the minimal performance cost. self.log.debug("RPC successfully started") if self.use_cli: return self.rpc = rpc self.rpc_connected = True self.url = self.rpc.url return except JSONRPCException as e: # Initialization phase # -28 RPC in warmup # -342 Service unavailable, RPC server started but is shutting down due to error if e.error['code'] != -28 and e.error['code'] != -342: raise # unknown JSON RPC exception except ConnectionResetError: # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount # succeeds. Try again to properly raise the FailedToStartError pass except OSError as e: if e.errno == errno.ETIMEDOUT: # Treat identical to ConnectionResetError pass elif e.errno == errno.ECONNREFUSED: # Port not yet open? pass else: # unknown OS error raise except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; # bitcoind is still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error( f"Unable to connect to bitcoind after {self.rpc_timeout}s") def wait_for_cookie_credentials(self): """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""" self.log.debug("Waiting for cookie credentials") # Poll at a rate of four times per second. poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): try: get_auth_cookie(self.datadir, self.chain) self.log.debug("Cookie credentials successfully retrieved") return except ValueError: # cookie file not found and no rpcuser or rpcpassword; # bitcoind is still starting so we continue polling until # RPC credentials are retrieved pass time.sleep(1.0 / poll_per_s) self._raise_assertion_error( f"Unable to retrieve cookie credentials after {self.rpc_timeout}s") def generate(self, nblocks, maxtries=1000000, **kwargs): self.log.debug( "TestNode.generate() dispatches `generate` call to `generatetoaddress`") return self.generatetoaddress( nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) def generateblock(self, *args, invalid_call, **kwargs): assert not invalid_call return self.__getattr__('generateblock')(*args, **kwargs) def generatetoaddress(self, *args, invalid_call, **kwargs): assert not invalid_call return self.__getattr__('generatetoaddress')(*args, **kwargs) def generatetodescriptor(self, *args, invalid_call, **kwargs): assert not invalid_call return self.__getattr__('generatetodescriptor')(*args, **kwargs) def buildavalancheproof(self, sequence: int, expiration: int, master: str, stakes: List[Dict[str, Any]], payoutAddress: Optional[str] = ADDRESS_ECREG_UNSPENDABLE) -> str: return self.__getattr__('buildavalancheproof')( sequence=sequence, expiration=expiration, master=master, stakes=stakes, payoutAddress=payoutAddress, ) def get_wallet_rpc(self, wallet_name): if self.use_cli: return RPCOverloadWrapper( self.cli(f"-rpcwallet={wallet_name}"), True, self.descriptors) else: assert self.rpc is not None, self._node_msg( "Error: RPC not initialized") assert self.rpc_connected, self._node_msg( "Error: RPC not connected") wallet_path = f"wallet/{urllib.parse.quote(wallet_name)}" return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors) def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop(wait=wait) except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # If there are any running perf processes, stop them. for profile_name in tuple(self.perf_subprocesses.keys()): self._stop_perf(profile_name) # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError( f"Unexpected stderr {stderr} != {expected_stderr}") self.stdout.close() self.stderr.close() del self.p2ps[:] if wait_until_stopped: self.wait_until_stopped() def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( f"Node returned non-zero exit code ({return_code}) when stopping") self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until_helper( self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor) @property def chain_path(self) -> Path: return Path(self.datadir) / self.chain @property def debug_log_path(self) -> Path: return self.chain_path / 'debug.log' def debug_log_bytes(self) -> int: with open(self.debug_log_path, encoding='utf-8') as dl: dl.seek(0, 2) return dl.tell() @contextlib.contextmanager def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): """Assert that some debug messages are present within some timeout. Unexpected debug messages may be optionally provided to fail a test if they appear before expected messages. Note: expected_msgs must always be non-empty even if the goal is to check for unexpected_msgs. This provides a bounded scenario such that "we expect to reach some target resulting in expected_msgs without seeing unexpected_msgs. Otherwise, we are testing that something never happens, which is fundamentally not robust test logic. """ if not expected_msgs: raise AssertionError("Expected debug messages is empty") if unexpected_msgs is None: unexpected_msgs = [] time_end = time.time() + timeout * self.timeout_factor prev_size = self.debug_log_bytes() yield while True: found = True with open(self.debug_log_path, encoding='utf-8') as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for unexpected_msg in unexpected_msgs: if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): self._raise_assertion_error( f'Unexpected message "{unexpected_msg}" partially matches ' f'log:\n\n{print_log}\n\n') for expected_msg in expected_msgs: if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: found = False if found: return if time.time() >= time_end: break time.sleep(0.05) self._raise_assertion_error( f'Expected messages "{expected_msgs}" does not partially match ' f'log:\n\n{print_log}\n\n') @contextlib.contextmanager def wait_for_debug_log( self, expected_msgs: List[bytes], timeout=60, interval=0.05, chatty_callable=None): """ Block until we see all the debug log messages or until we exceed the timeout. If a chatty_callable is provided, it is repeated at every iteration. """ time_end = time.time() + timeout * self.timeout_factor prev_size = self.debug_log_bytes() yield while True: found = True if chatty_callable is not None: # Ignore the chatty_callable returned value, as we are only # interested in the debug log content here. chatty_callable() with open(self.debug_log_path, "rb") as dl: dl.seek(prev_size) log = dl.read() for expected_msg in expected_msgs: if expected_msg not in log: found = False if found: return if time.time() >= time_end: print_log = " - " + \ "\n - ".join([f"\n - {line.decode()}" for line in log.splitlines()]) break time.sleep(interval) self._raise_assertion_error( f'Expected messages "{str(expected_msgs)}" does not partially match ' f'log:\n\n{print_log}\n\n') @contextlib.contextmanager def profile_with_perf(self, profile_name: str): """ Context manager that allows easy profiling of node activity using `perf`. See `test/functional/README.md` for details on perf usage. Args: profile_name: This string will be appended to the profile data filename generated by perf. """ subp = self._start_perf(profile_name) yield if subp: self._stop_perf(profile_name) def _start_perf(self, profile_name=None): """Start a perf process to profile this node. Returns the subprocess running perf.""" subp = None def test_success(cmd): return subprocess.call( # shell=True required for pipe use below cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 if not sys.platform.startswith('linux'): self.log.warning( "Can't profile with perf; only availabe on Linux platforms") return None if not test_success('which perf'): self.log.warning( "Can't profile with perf; must install perf-tools") return None if not test_success( f'readelf -S {shlex.quote(self.binary)} | grep .debug_str'): self.log.warning( "perf output won't be very useful without debug symbols compiled into bitcoind") output_path = tempfile.NamedTemporaryFile( dir=self.datadir, prefix=f"{profile_name or 'test'}.perf.data.", delete=False, ).name cmd = [ 'perf', 'record', '-g', # Record the callgraph. # Compatibility for gcc's --fomit-frame-pointer. '--call-graph', 'dwarf', '-F', '101', # Sampling frequency in Hz. '-p', str(self.process.pid), '-o', output_path, ] subp = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.perf_subprocesses[profile_name] = subp return subp def _stop_perf(self, profile_name): """Stop (and pop) a perf subprocess.""" subp = self.perf_subprocesses.pop(profile_name) output_path = subp.args[subp.args.index('-o') + 1] subp.terminate() subp.wait(timeout=10) stderr = subp.stderr.read().decode() if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: self.log.warning( "perf couldn't collect data! Try " "'sudo sysctl -w kernel.perf_event_paranoid=-1'") else: report_cmd = f"perf report -i {output_path}" self.log.info(f"See perf output by running '{report_cmd}'") def assert_start_raises_init_error( self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) ret = self.process.wait(timeout=self.rpc_timeout) self.log.debug(self._node_msg( f'bitcoind exited with status {ret} during initialization')) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( f'Expected message "{expected_msg}" does not partially ' f'match stderr:\n"{stderr}"') elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( f'Expected message "{expected_msg}" does not fully ' f'match stderr:\n"{stderr}"') elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( f'Expected message "{expected_msg}" does not fully ' f'match stderr:\n"{stderr}"') except subprocess.TimeoutExpired: self.process.kill() self.running = False self.process = None assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ' if expected_msg is None: assert_msg += "with an error" else: assert_msg += "with expected error " + expected_msg self._raise_assertion_error(assert_msg) def relay_fee(self, cached=True): if not self.relay_fee_cache or not cached: self.relay_fee_cache = self.getnetworkinfo()["relayfee"] return self.relay_fee_cache def calculate_fee(self, tx): """ Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: - the current relayfee on node - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr - all inputs currently unsigned (empty scriptSig) """ billable_size_estimate = tx.billable_size() # Add some padding for signatures / public keys # 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes) billable_size_estimate += len(tx.vin) * 107 # relay_fee gives a value in XEC per kB. return int(self.relay_fee() / 1000 * billable_size_estimate * XEC) def calculate_fee_from_txid(self, txid): ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) return self.calculate_fee(ctx) def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add an inbound p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect( **kwargs, net=self.chain, timeout_factor=self.timeout_factor)() self.p2ps.append(p2p_conn) p2p_conn.wait_until( lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() # At this point we have sent our version message and received the version and verack, however the full node # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully # established (fSuccessfullyConnected). # # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a # transaction that will be added to the mempool as soon as we return here. # # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) # in comparison to the upside of making tests less fragile and # unexpected intermittent errors less likely. p2p_conn.sync_with_ping() # Consistency check that the Bitcoin ABC has received our user agent # string. This checks the node's newest peer. It could be racy if # another Bitcoin ABC node has connected since we opened our # connection, but we don't expect that to happen. assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) return p2p_conn def add_outbound_p2p_connection( self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only", "addr-fetch", "feeler" or "avalanche" connection. This method adds the p2p connection to the self.p2ps list and returns the connection to the caller. """ def addconnection_callback(address, port): self.log.debug( f"Connecting to {address}:{port} {connection_type}") self.addconnection(f'{address}:{port}', connection_type) p2p_conn.peer_accept_connection( connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)() if connection_type == "feeler": # feeler connections are closed as soon as the node receives a # `version` message p2p_conn.wait_until( lambda: p2p_conn.message_count["version"] == 1, check_connected=False) p2p_conn.wait_until( lambda: not p2p_conn.is_connected, check_connected=False) else: p2p_conn.wait_for_connect() self.p2ps.append(p2p_conn) p2p_conn.wait_for_verack() p2p_conn.sync_with_ping() return p2p_conn def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION]) def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor) class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) def arg_to_cli(arg): if isinstance(arg, bool): return str(arg).lower() elif arg is None: return 'null' elif isinstance(arg, dict) or isinstance(arg, list): return json.dumps(arg, default=EncodeDecimal) else: return str(arg) class TestNodeCLI: """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir, emulator=None): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') self.emulator = emulator def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir, self.emulator) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: - results.append(dict(result=request())) + results.append({"result": request()}) except JSONRPCException as e: - results.append(dict(error=e)) + results.append({"error": e}) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), \ "Cannot use positional arguments and named arguments in the same " \ "bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug(f"Running bitcoin-cli {p_args[2:]}") if self.emulator is not None: p_args = [self.emulator] + p_args process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match( r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() - raise JSONRPCException(dict(code=int(code), message=message)) + raise JSONRPCException({"code": int(code), "message": message}) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") class RPCOverloadWrapper: def __init__(self, rpc, cli=False, descriptors=False): self.rpc = rpc self.is_cli = cli self.descriptors = descriptors def __getattr__(self, name): return getattr(self.rpc, name) def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None): if descriptors is None: descriptors = self.descriptors return self.__getattr__('createwallet')( wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup) def importprivkey(self, privkey, label=None, rescan=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importprivkey')(privkey, label, rescan) desc = descsum_create('combo(' + privkey + ')') req = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) def addmultisigaddress(self, nrequired, keys, label=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('addmultisigaddress')( nrequired, keys, label) cms = self.createmultisig(nrequired, keys) req = [{ 'desc': cms['descriptor'], 'timestamp': 0, 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) return cms def importpubkey(self, pubkey, label=None, rescan=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importpubkey')(pubkey, label, rescan) desc = descsum_create('combo(' + pubkey + ')') req = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) def importaddress(self, address, label=None, rescan=None, p2sh=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importaddress')( address, label, rescan, p2sh) is_hex = False try: int(address, 16) is_hex = True desc = descsum_create('raw(' + address + ')') except BaseException: desc = descsum_create('addr(' + address + ')') reqs = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] if is_hex and p2sh: reqs.append({ 'desc': descsum_create('p2sh(raw(' + address + '))'), 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }) import_res = self.importdescriptors(reqs) for res in import_res: if not res['success']: raise JSONRPCException(res['error']) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 38ac427ee..24ce5fb8c 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -1,936 +1,936 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse import configparser import datetime import json import logging import multiprocessing import os import re import shutil import subprocess import sys import tempfile import threading import time import unittest import xml.etree.ElementTree as ET from collections import deque from queue import Empty, Queue +from typing import Set # Formatting. Default colors to empty strings. BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): # type: ignore if os.name == 'nt': import ctypes kernel32 = ctypes.windll.kernel32 # type: ignore ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 # Enable ascii color control to stdout stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE) stdout_mode = ctypes.c_int32() kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode)) kernel32.SetConsoleMode( stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # Enable ascii color control to stderr stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE) stderr_mode = ctypes.c_int32() kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode)) kernel32.SetConsoleMode( stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') GREEN = ('\033[0m', '\033[0;32m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 TEST_FRAMEWORK_MODULES = [ "address", "blocktools", "messages", "muhash", "script", "txtools", "util", ] -NON_SCRIPTS = [ +NON_SCRIPTS = { # These are python files that live in the functional tests directory, but # are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", -] +} EXTRA_PRIVILEGES_TESTS = [ # These tests can only run with extra privileges. # They need to be excluded from the timing file because they are not # designed to run in the same context as the other tests. "interface_usdt_net.py", "interface_usdt_utxocache.py", "interface_usdt_validation.py", ] TEST_PARAMS = { # Some test can be run with additional parameters. # When a test is listed here, then it will be run without parameter as well # as with additional parameters listed here. # This: # example "testName" : [["--param1", "--param2"] , ["--param3"]] # will run the test 3 times: # testName # testName --param1 --param2 # testname --param3 "rpc_bind.py": [["--ipv4"], ["--ipv6"], ["--nonloopback"]], "rpc_createmultisig.py": [["--descriptors"]], "rpc_deriveaddresses.py": [["--usecli"]], "rpc_fundrawtransaction.py": [["--descriptors"]], "rpc_rawtransaction.py": [["--descriptors"]], "rpc_signrawtransaction.py": [["--descriptors"]], # FIXME: "rpc_psbt.py": [["--descriptors"]], "wallet_address_types.py": [["--descriptors"]], "tool_wallet.py": [["--descriptors"]], "wallet_avoidreuse.py": [["--descriptors"]], "wallet_balance.py": [["--descriptors"]], # FIXME: "wallet_basic.py": [["--descriptors"]], "wallet_createwallet.py": [["--usecli"], ["--descriptors"]], "wallet_encryption.py": [["--descriptors"]], "wallet_hd.py": [["--descriptors"]], "wallet_importprunedfunds.py": [["--descriptors"]], # FIXME: "wallet_keypool.py": [["--descriptors"]], "wallet_keypool_topup.py": [["--descriptors"]], "wallet_labels.py": [["--descriptors"]], "wallet_listsinceblock.py": [["--descriptors"]], "wallet_listtransactions.py": [["--descriptors"]], "wallet_multiwallet.py": [["--usecli"]], "wallet_txn_doublespend.py": [["--mineblock"]], "wallet_txn_clone.py": [["--mineblock"]], "wallet_watchonly.py": [["--usecli"]], } # Used to limit the number of tests, when list of tests is not provided on command line # When --extended is specified, we run all tests, otherwise # we only run a test if its execution time in seconds does not exceed # EXTENDED_CUTOFF DEFAULT_EXTENDED_CUTOFF = 40 DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 def bold(text) -> str: return f"{BOLD[1]}{text}{BOLD[0]}" class TestCase: """ Data structure to hold and run information necessary to launch a test case. """ def __init__(self, test_num, test_case, tests_dir, tmpdir, failfast_event, flags=None): self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_case = test_case self.test_num = test_num self.failfast_event = failfast_event self.flags = flags def run(self): if self.failfast_event.is_set(): return TestResult(self.test_num, self.test_case, "", "Skipped", 0, "", "") portseed = self.test_num portseed_arg = [f"--portseed={portseed}"] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = self.test_case.split() testname = re.sub('.py$', '', test_argv[0]) testdir = os.path.join(f"{self.tmpdir}", f"{testname}_{portseed}") tmpdir_arg = [f"--tmpdir={testdir}"] start_time = time.time() process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr) process.wait() log_stdout.seek(0), log_stderr.seek(0) [stdout, stderr] = [log.read().decode('utf-8') for log in (log_stdout, log_stderr)] log_stdout.close(), log_stderr.close() if process.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif process.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" return TestResult(self.test_num, self.test_case, testdir, status, time.time() - start_time, stdout, stderr) def on_ci(): return os.getenv('TRAVIS') == 'true' or os.getenv( 'TEAMCITY_VERSION') is not None def main(): # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.join(os.path.abspath( os.path.dirname(__file__)), "..", "config.ini") config.read_file(open(configfile, encoding="utf8")) src_dir = config["environment"]["SRCDIR"] build_dir = config["environment"]["BUILDDIR"] tests_dir = os.path.join(src_dir, 'test', 'functional') # SRCDIR must be set for cdefs.py to find and parse consensus.h os.environ["SRCDIR"] = src_dir # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to ' 'the console, combined from the test framework ' 'and all test nodes.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument( '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, help='set the cutoff runtime for what tests get run') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS, help='how many test scripts to run in parallel.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs") parser.add_argument( '--failfast', action='store_true', help='stop execution after the first test failure') parser.add_argument('--junitoutput', '-J', help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.") parser.add_argument('--testsuitename', '-n', default='Bitcoin ABC functional tests', help="Name of the test suite, as it will appear in the logs and in the JUnit report.") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the # remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] passon_args.append(f"--configfile={configfile}") # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) logging.info(f"Starting {args.testsuitename}") # Create base test directory tmpdir = os.path.join(f"{args.tmpdirprefix}", f"test_runner_₿₵_🏃_{datetime.datetime.now():%Y%m%d_%H%M%S}") os.makedirs(tmpdir) logging.debug(f"Temporary test directory at {tmpdir}") if args.junitoutput and not os.path.isabs(args.junitoutput): args.junitoutput = os.path.join(tmpdir, args.junitoutput) enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if not enable_bitcoind: print("No functional tests to run.") print("Rerun ./configure with --with-daemon and then make") sys.exit(0) # Build list of tests all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS) # Check all tests with parameters actually exist for test in TEST_PARAMS: if test not in all_scripts: print(f"ERROR: Test with parameter {test} does not exist, check it has " "not been renamed or deleted") sys.exit(1) if tests: # Individual tests have been specified. Run specified tests that exist # in the all_scripts list. Accept the name with or without .py # extension. individual_tests = [ re.sub(r"\.py$", "", test) + ".py" for test in tests if not test.endswith('*')] test_list = [] for test in individual_tests: if test in all_scripts: test_list.append(test) else: print(f"{bold('WARNING!')} Test '{test}' not found in full test list.") # Allow for wildcard at the end of the name, so a single input can # match multiple tests for test in tests: if test.endswith('*'): test_list.extend( [t for t in all_scripts if t.startswith(test[:-1])]) # do not cut off explicitly specified tests cutoff = sys.maxsize else: # Run base tests only test_list = all_scripts cutoff = sys.maxsize if args.extended else args.cutoff # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: exclude_tests = [re.sub(r"\.py$", "", test) + (".py" if ".py" not in test else "") for test in args.exclude.split(',')] for exclude_test in exclude_tests: if exclude_test in test_list: test_list.remove(exclude_test) else: print(f"{bold('WARNING!')} Test '{exclude_test}' not found in current " f"test list.") # Update timings from build_dir only if separate build directory is used. # We do not want to pollute source directory. build_timings = None if (src_dir != build_dir): build_timings = Timings(os.path.join(build_dir, 'timing.json')) # Always use timings from src_dir if present src_timings = Timings(os.path.join( src_dir, "test", "functional", 'timing.json')) # Add test parameters and remove long running tests if needed test_list = get_tests_to_run( test_list, TEST_PARAMS, cutoff, src_timings) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script # and exit. parser.print_help() subprocess.check_call( [sys.executable, os.path.join(tests_dir, test_list[0]), '-h']) sys.exit(0) check_script_prefixes(all_scripts) if not args.keepcache: shutil.rmtree(os.path.join(build_dir, "test", "cache"), ignore_errors=True) run_tests( test_list, build_dir, tests_dir, args.junitoutput, tmpdir, num_jobs=args.jobs, test_suite_name=args.testsuitename, enable_coverage=args.coverage, args=passon_args, combined_logs_len=args.combinedlogslen, build_timings=build_timings, failfast=args.failfast ) def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name, enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False): args = args or [] # Warn if bitcoind is already running try: # pgrep exits with code zero when one or more matching processes found if subprocess.run(["pgrep", "-x", "bitcoind"], stdout=subprocess.DEVNULL).returncode == 0: print( f"{bold('WARNING!')} There is already a bitcoind process running on " f"this system. Tests may fail unexpectedly due to resource contention!") except OSError: # pgrep not supported pass # Warn if there is a cache directory cache_dir = os.path.join(build_dir, "test", "cache") if os.path.isdir(cache_dir): print(f"{bold('WARNING!')} There is a cache directory here: {cache_dir}. " "If tests fail unexpectedly, try deleting the cache directory.") # Test Framework Tests print("Running Unit Tests for Test Framework Modules") test_framework_tests = unittest.TestSuite() for module in TEST_FRAMEWORK_MODULES: test_framework_tests.addTest( unittest.TestLoader().loadTestsFromName(f"test_framework.{module}")) result = unittest.TextTestRunner( verbosity=1, failfast=True).run(test_framework_tests) if not result.wasSuccessful(): logging.debug( "Early exiting after failure in TestFramework unit tests") sys.exit(False) flags = [f'--cachedir={cache_dir}'] + args if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug( f"Initializing coverage directory at {coverage.dir}") else: coverage = None if len(test_list) > 1 and num_jobs > 1: # Populate cache try: subprocess.check_output( [sys.executable, os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join(f"--tmpdir={tmpdir}", "cache")]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise # Run Tests start_time = time.time() test_results = execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast) runtime = time.time() - start_time max_len_name = len(max(test_list, key=len)) print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len) if junitoutput is not None: save_results_as_junit( test_results, junitoutput, runtime, test_suite_name) if (build_timings is not None): build_timings.save_timings(test_results) if coverage: coverage_passed = coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() else: coverage_passed = True # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) - all_passed = all(map( - lambda test_result: test_result.was_successful, test_results)) and coverage_passed + all_passed = all(res.was_successful for res in test_results) and coverage_passed sys.exit(not all_passed) def execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False): update_queue = Queue() job_queue = Queue() failfast_event = threading.Event() test_results = [] poll_timeout = 10 # seconds ## # Define some helper functions we will need for threading. ## def handle_message(message, running_jobs): """ handle_message handles a single message from handle_test_cases """ if isinstance(message, TestCase): running_jobs.append((message.test_num, message.test_case)) print(f"{bold(message.test_case)} started") return if isinstance(message, TestResult): test_result = message running_jobs.remove((test_result.num, test_result.name)) test_results.append(test_result) if test_result.status == "Passed": print(f"{bold(test_result.name)} passed, " f"Duration: {TimeResolution.seconds(test_result.time)} s") elif test_result.status == "Skipped": print(f"{bold(test_result.name)} skipped") else: print(f"{bold(test_result.name)} failed, " f"Duration: {TimeResolution.seconds(test_result.time)} s\n") print(bold('stdout:')) print(test_result.stdout) print(bold('stderr:')) print(test_result.stderr) if failfast: logging.debug("Early exiting after test failure") failfast_event.set() return assert False, "we should not be here" def handle_update_messages(): """ handle_update_messages waits for messages to be sent from handle_test_cases via the update_queue. It serializes the results so we can print nice status update messages. """ printed_status = False running_jobs = [] while True: message = None try: message = update_queue.get(True, poll_timeout) if message is None: break # We printed a status message, need to kick to the next line # before printing more. if printed_status: print() printed_status = False handle_message(message, running_jobs) update_queue.task_done() except Empty: if not on_ci(): jobs = ", ".join([j[1] for j in running_jobs]) print(f"Running jobs: {jobs}", end="\r") sys.stdout.flush() printed_status = True def handle_test_cases(): """ job_runner represents a single thread that is part of a worker pool. It waits for a test, then executes that test. It also reports start and result messages to handle_update_messages """ while True: test = job_queue.get() if test is None: break # Signal that the test is starting to inform the poor waiting # programmer update_queue.put(test) result = test.run() update_queue.put(result) job_queue.task_done() ## # Setup our threads, and start sending tasks ## # Start our result collection thread. resultCollector = threading.Thread(target=handle_update_messages) resultCollector.daemon = True resultCollector.start() # Start some worker threads for _ in range(num_jobs): t = threading.Thread(target=handle_test_cases) t.daemon = True t.start() # Push all our test cases into the job queue. for i, t in enumerate(test_list): job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags)) # Wait for all the jobs to be completed job_queue.join() # Wait for all the results to be compiled update_queue.join() # Flush our queues so the threads exit update_queue.put(None) for _ in range(num_jobs): job_queue.put(None) return test_results def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len): results = bold(f"\n{'TEST':<{max_len_name}} | {'STATUS':<9} | DURATION\n\n") test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) testdir = test_result.testdir if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print( bold(f'Combine the logs and print the last {combined_logs_len} lines ...')) print('\n============') print(bold(f'Combined log for {testdir}:')) print('============\n') combined_logs_args = [ sys.executable, os.path.join( tests_dir, 'combine_logs.py'), testdir] if BOLD[0]: combined_logs_args += ['--color'] combined_logs, _ = subprocess.Popen( combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate() print( "\n".join( deque( combined_logs.splitlines(), combined_logs_len))) status = TICK + "Passed" if all_passed else CROSS + "Failed" if not all_passed: results += RED[1] results += bold( f"\n{'ALL':<{max_len_name}} | {status:<9} | " f"{TimeResolution.seconds(time_sum)} s (accumulated) \n") if not all_passed: results += RED[0] results += f"Runtime: {TimeResolution.seconds(runtime)} s\n" print(results) class TestResult: """ Simple data structure to store test result values and print them properly """ def __init__(self, num, name, testdir, status, time, stdout, stderr): self.num = num self.name = name self.testdir = testdir self.status = status self.time = time self.padding = 0 self.stdout = stdout self.stderr = stderr def sort_key(self): if self.status == "Passed": return 0, self.name.lower() elif self.status == "Failed": return 2, self.name.lower() elif self.status == "Skipped": return 1, self.name.lower() def __repr__(self): if self.status == "Passed": color = GREEN glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return ( f"{color[1]}{self.name:<{self.padding}} | {glyph}{self.status:<7} | " f"{TimeResolution.seconds(self.time)} s\n{color[0]}" ) @property def was_successful(self): return self.status != "Failed" -def get_all_scripts_from_disk(test_dir, non_scripts): +def get_all_scripts_from_disk(test_dir, non_scripts: Set[str]) -> Set[str]: """ Return all available test script from script directory (excluding NON_SCRIPTS) """ - python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) - return list(python_files - set(non_scripts)) + python_files = {t for t in os.listdir(test_dir) if t[-3:] == ".py"} + return python_files - non_scripts def check_script_prefixes(all_scripts): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" EXPECTED_VIOLATION_COUNT = 14 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming # convention don't immediately cause the tests to fail. LEEWAY = 0 good_prefixes_re = re.compile( "(abc_)?(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool|chronik)_") bad_script_names = [ script for script in all_scripts if good_prefixes_re.match(script) is None] if len(bad_script_names) < EXPECTED_VIOLATION_COUNT: print(f"{bold('HURRAY!')} Number of functional tests violating naming " "convention reduced!") print("Consider reducing EXPECTED_VIOLATION_COUNT from " f"{EXPECTED_VIOLATION_COUNT} to {len(bad_script_names)}") elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT: print(f"INFO: {len(bad_script_names)} tests not meeting naming conventions " f"(expected {EXPECTED_VIOLATION_COUNT}):") formatted_bad_script_names = '\n '.join(sorted(bad_script_names)) print(f" {formatted_bad_script_names}") assert \ len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + LEEWAY, \ f"Too many tests not following naming convention! ({len(bad_script_names)}" \ f" found, expected: <= {EXPECTED_VIOLATION_COUNT})" def get_tests_to_run(test_list, test_params, cutoff, src_timings): """ Returns only test that will not run longer that cutoff. Long running tests are returned first to favor running tests in parallel Timings from build directory override those from src directory """ def get_test_time(test): # Return 0 if test is unknown to always run it return next( (x['time'] for x in src_timings.existing_timings if x['name'] == test), 0) # Some tests must also be run with additional parameters. Add them to the # list. tests_with_params = [] for test_name in test_list: # always execute a test without parameters tests_with_params.append(test_name) params = test_params.get(test_name) if params is not None: tests_with_params.extend( [test_name + " " + " ".join(parameter) for parameter in params]) result = [ test for test in tests_with_params if get_test_time(test) <= cutoff] result.sort(key=lambda x: (-get_test_time(x), x)) return result class RPCCoverage: """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = f"--coveragedir={self.dir}" def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join(f" - {i}\n" for i in sorted(uncovered))) return False else: print("All RPC commands covered.") return True def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test_framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() # Consider RPC generate covered, because it is overloaded in # test_framework/test_node.py and not seen by the coverage check. covered_cmds = set({'generate'}) if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r', encoding="utf8") as file: all_cmds.update([line.strip() for line in file.readlines()]) for root, _, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r', encoding="utf8") as file: covered_cmds.update([line.strip() for line in file.readlines()]) return all_cmds - covered_cmds def save_results_as_junit(test_results, file_name, time, test_suite_name): """ Save tests results to file in JUnit format See http://llg.cubic.org/docs/junit/ for specification of format """ e_test_suite = ET.Element("testsuite", {"name": f"{test_suite_name}", "tests": str(len(test_results)), # "errors": "failures": str(len([t for t in test_results if t.status == "Failed"])), "id": "0", "skipped": str(len([t for t in test_results if t.status == "Skipped"])), "time": str(TimeResolution.milliseconds(time)), "timestamp": datetime.datetime.now().isoformat('T') }) for test_result in test_results: e_test_case = ET.SubElement(e_test_suite, "testcase", {"name": test_result.name, "classname": test_result.name, "time": str(TimeResolution.milliseconds(test_result.time)) } ) if test_result.status == "Skipped": ET.SubElement(e_test_case, "skipped") elif test_result.status == "Failed": ET.SubElement(e_test_case, "failure") # no special element for passed tests ET.SubElement(e_test_case, "system-out").text = test_result.stdout ET.SubElement(e_test_case, "system-err").text = test_result.stderr ET.ElementTree(e_test_suite).write( file_name, "UTF-8", xml_declaration=True) class Timings: """ Takes care of loading, merging and saving tests execution times. """ def __init__(self, timing_file): self.timing_file = timing_file self.existing_timings = self.load_timings() def load_timings(self): if os.path.isfile(self.timing_file): with open(self.timing_file, encoding="utf8") as file: return json.load(file) else: return [] def get_merged_timings(self, new_timings): """ Return new list containing existing timings updated with new timings Tests that do not exists are not removed """ key = 'name' merged = {} for item in self.existing_timings + new_timings: if item[key] in merged: merged[item[key]].update(item) else: merged[item[key]] = item # Sort the result to preserve test ordering in file merged = list(merged.values()) merged.sort(key=lambda t, key=key: t[key]) return merged def save_timings(self, test_results): # we only save test that have passed - timings for failed test might be # wrong (timeouts or early fails), and we exclude the tests that require # extra privileges. passed_results = [ test for test in test_results if test.status == 'Passed' and test.name not in EXTRA_PRIVILEGES_TESTS] - new_timings = list(map(lambda test: {'name': test.name, 'time': TimeResolution.seconds(test.time)}, - passed_results)) + new_timings = [{'name': test.name, 'time': TimeResolution.seconds(test.time)} + for test in passed_results] merged_timings = self.get_merged_timings(new_timings) with open(self.timing_file, 'w', encoding="utf8") as file: json.dump(merged_timings, file, indent=True) class TimeResolution: @staticmethod def seconds(time_fractional_second): return round(time_fractional_second) @staticmethod def milliseconds(time_fractional_second): return round(time_fractional_second, 3) if __name__ == '__main__': main() diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 97c3e76b2..b927b121c 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -1,683 +1,683 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet.""" from decimal import Decimal from test_framework.messages import CTransaction, FromHex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_array_result, assert_equal, assert_fee_amount, assert_raises_rpc_error, count_bytes, ) from test_framework.wallet_util import test_address FAR_IN_THE_FUTURE = 2000000000 class WalletTest(BitcoinTestFramework): WELLINGTON_FAR_FUTURE = f"-wellingtonactivationtime={int(9e9)}" def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True self.extra_args = [ [ "-acceptnonstdtxn=1", "-whitelist=noban@127.0.0.1", # This test tests mempool ancestor chain limits, which are no # longer enforced after wellington, so we need to force # wellington to activate in the distant future f"-wellingtonactivationtime={FAR_IN_THE_FUTURE}", ], ] * self.num_nodes self.supports_cli = False def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() # Only need nodes 0-2 running at start of test self.stop_node(3) self.connect_nodes(0, 1) self.connect_nodes(1, 2) self.connect_nodes(0, 2) self.sync_all(self.nodes[0:3]) def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): """Return curr_balance after asserting the fee was in range""" fee = balance_with_fee - curr_balance assert_fee_amount(fee, tx_size, fee_per_byte * 1000) return curr_balance def run_test(self): # Check that there's no UTXO on none of the nodes assert_equal(len(self.nodes[0].listunspent()), 0) assert_equal(len(self.nodes[1].listunspent()), 0) assert_equal(len(self.nodes[2].listunspent()), 0) self.log.info("Mining blocks...") self.generate(self.nodes[0], 1, sync_fun=self.no_op) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 50000000) assert_equal(walletinfo['balance'], 0) self.sync_all(self.nodes[0:3]) self.generate(self.nodes[1], 101, sync_fun=lambda: self.sync_all(self.nodes[0:3])) assert_equal(self.nodes[0].getbalance(), 50000000) assert_equal(self.nodes[1].getbalance(), 50000000) assert_equal(self.nodes[2].getbalance(), 0) # Check that only first and second nodes have UTXOs utxos = self.nodes[0].listunspent() assert_equal(len(utxos), 1) assert_equal(len(self.nodes[1].listunspent()), 1) assert_equal(len(self.nodes[2].listunspent()), 0) self.log.info("test gettxout") confirmed_txid, confirmed_index = utxos[0]["txid"], utxos[0]["vout"] # First, outputs that are unspent both in the chain and in the # mempool should appear with or without include_mempool txout = self.nodes[0].gettxout( txid=confirmed_txid, n=confirmed_index, include_mempool=False) assert_equal(txout['value'], 50000000) txout = self.nodes[0].gettxout( txid=confirmed_txid, n=confirmed_index, include_mempool=True) assert_equal(txout['value'], 50000000) # Send 21,000,000 XEC from 0 to 2 using sendtoaddress call. self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11000000) mempool_txid = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 10000000) self.log.info("test gettxout (second part)") # utxo spent in mempool should be visible if you exclude mempool # but invisible if you include mempool txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, False) assert_equal(txout['value'], 50000000) txout = self.nodes[0].gettxout(confirmed_txid, confirmed_index, True) assert txout is None # new utxo from mempool should be invisible if you exclude mempool # but visible if you include mempool txout = self.nodes[0].gettxout(mempool_txid, 0, False) assert txout is None txout1 = self.nodes[0].gettxout(mempool_txid, 0, True) txout2 = self.nodes[0].gettxout(mempool_txid, 1, True) # note the mempool tx will have randomly assigned indices # but 10 will go to node2 and the rest will go to node0 balance = self.nodes[0].getbalance() - assert_equal(set([txout1['value'], txout2['value']]), - set([10000000, balance])) + assert_equal({txout1['value'], txout2['value']}, + {10000000, balance}) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 0) # Have node0 mine a block, thus it will collect its own fee. self.generate( self.nodes[0], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) # Exercise locking of unspent outputs unspent_0 = self.nodes[2].listunspent()[0] unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} assert_raises_rpc_error(-8, "Invalid parameter, expected locked output", self.nodes[2].lockunspent, True, [unspent_0]) self.nodes[2].lockunspent(False, [unspent_0]) assert_raises_rpc_error(-8, "Invalid parameter, output already locked", self.nodes[2].lockunspent, False, [unspent_0]) assert_raises_rpc_error(-6, "Insufficient funds", self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20000000) assert_equal([unspent_0], self.nodes[2].listlockunspent()) self.nodes[2].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[2].listlockunspent()), 0) assert_raises_rpc_error(-8, "txid must be of length 64 (not 34, for '0000000000000000000000000000000000')", self.nodes[2].lockunspent, False, [{"txid": "0000000000000000000000000000000000", "vout": 0}]) assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[2].lockunspent, False, [{"txid": "ZZZ0000000000000000000000000000000000000000000000000000000000000", "vout": 0}]) assert_raises_rpc_error(-8, "Invalid parameter, unknown transaction", self.nodes[2].lockunspent, False, [{"txid": "0000000000000000000000000000000000000000000000000000000000000000", "vout": 0}]) assert_raises_rpc_error(-8, "Invalid parameter, vout index out of bounds", self.nodes[2].lockunspent, False, [{"txid": unspent_0["txid"], "vout": 999}]) # The lock on a manually selected output is ignored unspent_0 = self.nodes[1].listunspent()[0] self.nodes[1].lockunspent(False, [unspent_0]) tx = self.nodes[1].createrawtransaction( [unspent_0], {self.nodes[1].getnewaddress(): 1000000}) tx = self.nodes[1].fundrawtransaction(tx)['hex'] self.nodes[1].fundrawtransaction(tx, {"lockUnspents": True}) # fundrawtransaction can lock an input self.nodes[1].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[1].listlockunspent()), 0) tx = self.nodes[1].fundrawtransaction( tx, {"lockUnspents": True})['hex'] assert_equal(len(self.nodes[1].listlockunspent()), 1) # Send transaction tx = self.nodes[1].signrawtransactionwithwallet(tx)["hex"] self.nodes[1].sendrawtransaction(tx) assert_equal(len(self.nodes[1].listlockunspent()), 0) # Have node1 generate 100 blocks (so node0 can recover the fee) self.generate(self.nodes[1], 100, sync_fun=lambda: self.sync_all(self.nodes[0:3])) # node0 should end up with 100 btc in block rewards plus fees, but # minus the 21 plus fees sent to node2 assert_equal(self.nodes[0].getbalance(), 100000000 - 21000000) assert_equal(self.nodes[2].getbalance(), 21000000) # Node0 should have two unspent outputs. # Create a couple of transactions to send them to node2, submit them through # node1, and make sure both node0 and node2 pick them up properly: node0utxos = self.nodes[0].listunspent(1) assert_equal(len(node0utxos), 2) # create both transactions txns_to_send = [] for utxo in node0utxos: inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[2].getnewaddress()] = utxo["amount"] - 3000000 raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) txns_to_send.append( self.nodes[0].signrawtransactionwithwallet(raw_tx)) # Have node 1 (miner) send the transactions self.nodes[1].sendrawtransaction( hexstring=txns_to_send[0]["hex"], maxfeerate=0) self.nodes[1].sendrawtransaction( hexstring=txns_to_send[1]["hex"], maxfeerate=0) # Have node1 mine a block to confirm transactions: self.generate( self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 94000000) # Verify that a spent output cannot be locked anymore spent_0 = {"txid": node0utxos[0]["txid"], "vout": node0utxos[0]["vout"]} assert_raises_rpc_error(-8, "Invalid parameter, expected unspent output", self.nodes[0].lockunspent, False, [spent_0]) # Send 10,000,000 XEC normal old_balance = self.nodes[2].getbalance() address = self.nodes[0].getnewaddress("test") fee_per_byte = Decimal('1000') / 1000 self.nodes[2].settxfee(fee_per_byte * 1000) txid = self.nodes[2].sendtoaddress(address, 10000000, "", "", False) self.generate( self.nodes[2], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) ctx = FromHex(CTransaction(), self.nodes[2].gettransaction(txid)['hex']) node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), old_balance - Decimal('10000000'), fee_per_byte, ctx.billable_size()) assert_equal(self.nodes[0].getbalance(), Decimal('10000000')) # Send 10,000,000 XEC with subtract fee from amount txid = self.nodes[2].sendtoaddress(address, 10000000, "", "", True) self.generate( self.nodes[2], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) node_2_bal -= Decimal('10000000') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal( '20000000'), fee_per_byte, count_bytes(self.nodes[2].gettransaction(txid)['hex'])) self.log.info("Test sendmany") # Sendmany 10,000,000 XEC txid = self.nodes[2].sendmany('', {address: 10000000}, 0, "", []) self.generate( self.nodes[2], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) node_0_bal += Decimal('10000000') ctx = FromHex(CTransaction(), self.nodes[2].gettransaction(txid)['hex']) node_2_bal = self.check_fee_amount(self.nodes[2].getbalance( ), node_2_bal - Decimal('10000000'), fee_per_byte, ctx.billable_size()) assert_equal(self.nodes[0].getbalance(), node_0_bal) # Sendmany 10,000,000 XEC with subtract fee from amount txid = self.nodes[2].sendmany( '', {address: 10000000}, 0, "", [address]) self.generate( self.nodes[2], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) node_2_bal -= Decimal('10000000') assert_equal(self.nodes[2].getbalance(), node_2_bal) ctx = FromHex(CTransaction(), self.nodes[2].gettransaction(txid)['hex']) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance( ), node_0_bal + Decimal('10000000'), fee_per_byte, ctx.billable_size()) self.start_node(3, self.extra_args[3]) self.connect_nodes(0, 3) self.sync_all() # check if we can list zero value tx as available coins # 1. create raw_tx # 2. hex-changed one output to 0.0 # 3. sign and send # 4. check if recipient (node0) can list the zero value tx usp = self.nodes[1].listunspent( query_options={'minimumAmount': '49998000'})[0] inputs = [{"txid": usp['txid'], "vout": usp['vout']}] outputs = {self.nodes[1].getnewaddress(): 49998000, self.nodes[0].getnewaddress(): 11110000} rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace( "c0833842", "00000000") # replace 11.11 with 0.0 (int32) signed_raw_tx = self.nodes[1].signrawtransactionwithwallet(rawTx) decoded_raw_tx = self.nodes[1].decoderawtransaction( signed_raw_tx['hex']) zero_value_txid = decoded_raw_tx['txid'] self.nodes[1].sendrawtransaction(signed_raw_tx['hex']) self.sync_all() self.generate(self.nodes[1], 1) # mine a block # zero value tx must be in listunspents output unspent_txs = self.nodes[0].listunspent() found = False for uTx in unspent_txs: if uTx['txid'] == zero_value_txid: found = True assert_equal(uTx['amount'], Decimal('0')) assert found # do some -walletbroadcast tests self.stop_nodes() self.start_node(0, self.extra_args[0] + ["-walletbroadcast=0"]) self.start_node(1, self.extra_args[1] + ["-walletbroadcast=0"]) self.start_node(2, self.extra_args[2] + ["-walletbroadcast=0"]) self.connect_nodes(0, 1) self.connect_nodes(1, 2) self.connect_nodes(0, 2) self.sync_all(self.nodes[0:3]) txid_not_broadcast = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 2000000) tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast) # mine a block, tx should not be in there self.generate( self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) # should not be changed because tx was not broadcasted assert_equal(self.nodes[2].getbalance(), node_2_bal) # now broadcast from another node, mine a block, sync, and check the # balance self.nodes[1].sendrawtransaction(tx_obj_not_broadcast['hex']) self.generate( self.nodes[1], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) node_2_bal += 2000000 tx_obj_not_broadcast = self.nodes[0].gettransaction(txid_not_broadcast) assert_equal(self.nodes[2].getbalance(), node_2_bal) # create another tx txid_not_broadcast = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 2000000) # restart the nodes with -walletbroadcast=1 self.stop_nodes() self.start_node(0, self.extra_args[0]) self.start_node(1, self.extra_args[1]) self.start_node(2, self.extra_args[2]) self.connect_nodes(0, 1) self.connect_nodes(1, 2) self.connect_nodes(0, 2) self.sync_blocks(self.nodes[0:3]) self.generate( self.nodes[0], 1, sync_fun=lambda: self.sync_blocks(self.nodes[0:3])) node_2_bal += 2000000 # tx should be added to balance because after restarting the nodes tx # should be broadcasted assert_equal(self.nodes[2].getbalance(), node_2_bal) # send a tx with value in a string (PR#6380 +) txid = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "2000000") tx_obj = self.nodes[0].gettransaction(txid) assert_equal(tx_obj['amount'], Decimal('-2000000')) txid = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "10000") tx_obj = self.nodes[0].gettransaction(txid) assert_equal(tx_obj['amount'], Decimal('-10000')) # check if JSON parser can handle scientific notation in strings txid = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "1e3") tx_obj = self.nodes[0].gettransaction(txid) assert_equal(tx_obj['amount'], Decimal('-1000')) # General checks for errors from incorrect inputs # This will raise an exception because the amount is negative assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "-1") # This will raise an exception because the amount type is wrong assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].sendtoaddress, self.nodes[2].getnewaddress(), "1f-4") # This will raise an exception since generate does not accept a string assert_raises_rpc_error(-1, "not an integer", self.generate, self.nodes[0], "2") # This will raise an exception for the invalid private key format assert_raises_rpc_error(-5, "Invalid private key encoding", self.nodes[0].importprivkey, "invalid") # This will raise an exception for importing an address with the PS2H # flag temp_address = self.nodes[1].getnewaddress() assert_raises_rpc_error(-5, "Cannot use the p2sh flag with an address - use a script instead", self.nodes[0].importaddress, temp_address, "label", False, True) # This will raise an exception for attempting to dump the private key # of an address you do not own assert_raises_rpc_error(-4, "Private key for address", self.nodes[0].dumpprivkey, temp_address) # This will raise an exception for attempting to get the private key of # an invalid Bitcoin address assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].dumpprivkey, "invalid") # This will raise an exception for attempting to set a label for an # invalid Bitcoin address assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].setlabel, "invalid address", "label") # This will raise an exception for importing an invalid address assert_raises_rpc_error(-5, "Invalid Bitcoin address or script", self.nodes[0].importaddress, "invalid") # This will raise an exception for attempting to import a pubkey that # isn't in hex assert_raises_rpc_error(-5, "Pubkey must be a hex string", self.nodes[0].importpubkey, "not hex") # This will raise an exception for importing an invalid pubkey assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f") # Import address and private key to check correct behavior of spendable unspents # 1. Send some coins to generate new UTXO address_to_import = self.nodes[2].getnewaddress() txid = self.nodes[0].sendtoaddress(address_to_import, 1000000) self.generate( self.nodes[0], 1, sync_fun=lambda: self.sync_all(self.nodes[0:3])) # 2. Import address from node2 to node1 self.nodes[1].importaddress(address_to_import) # 3. Validate that the imported address is watch-only on node1 assert self.nodes[1].getaddressinfo(address_to_import)["iswatchonly"] # 4. Check that the unspents after import are not spendable assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": False}) # 5. Import private key of the previously imported address on node1 priv_key = self.nodes[2].dumpprivkey(address_to_import) self.nodes[1].importprivkey(priv_key) # 6. Check that the unspents are now spendable on node1 assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": True}) # Mine a block from node0 to an address from node1 coinbase_addr = self.nodes[1].getnewaddress() block_hash = self.generatetoaddress( self.nodes[0], 1, coinbase_addr, sync_fun=lambda: self.sync_all(self.nodes[0:3]))[0] coinbase_txid = self.nodes[0].getblock(block_hash)['tx'][0] # Check that the txid and balance is found by node1 self.nodes[1].gettransaction(coinbase_txid) # check if wallet or blockchain maintenance changes the balance self.sync_all(self.nodes[0:3]) blocks = self.generate( self.nodes[0], 2, sync_fun=lambda: self.sync_all(self.nodes[0:3])) balance_nodes = [self.nodes[i].getbalance() for i in range(3)] block_count = self.nodes[0].getblockcount() # Check modes: # - True: unicode escaped as \u.... # - False: unicode directly as UTF-8 for mode in [True, False]: self.nodes[0].rpc.ensure_ascii = mode # unicode check: Basic Multilingual Plane, Supplementary Plane # respectively for label in [u'рыба', u'𝅘𝅥𝅯']: addr = self.nodes[0].getnewaddress() self.nodes[0].setlabel(addr, label) test_address(self.nodes[0], addr, labels=[label]) assert label in self.nodes[0].listlabels() # restore to default self.nodes[0].rpc.ensure_ascii = True # maintenance tests maintenance = [ '-rescan', '-reindex', ] chainlimit = 6 for m in maintenance: self.log.info(f"check {m}") self.stop_nodes() # set lower ancestor limit for later self.start_node( 0, self.extra_args[0] + [m, f"-limitancestorcount={str(chainlimit)}"]) self.start_node( 1, self.extra_args[1] + [m, f"-limitancestorcount={str(chainlimit)}"]) self.start_node( 2, self.extra_args[2] + [m, f"-limitancestorcount={str(chainlimit)}"]) if m == '-reindex': # reindex will leave rpc warm up "early"; Wait for it to finish self.wait_until( lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)]) assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) # Exercise listsinceblock with the last two blocks coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) assert_equal(coinbase_tx_1["lastblock"], blocks[1]) assert_equal(len(coinbase_tx_1["transactions"]), 1) assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) assert_equal(len(self.nodes[0].listsinceblock( blocks[1])["transactions"]), 0) # ==Check that wallet prefers to use coins that don't exceed mempool li # Get all non-zero utxos together chain_addrs = [self.nodes[0].getnewaddress( ), self.nodes[0].getnewaddress()] singletxid = self.nodes[0].sendtoaddress( chain_addrs[0], self.nodes[0].getbalance(), "", "", True) self.generate(self.nodes[0], 1, sync_fun=self.no_op) node0_balance = self.nodes[0].getbalance() # Split into two chains rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], { chain_addrs[0]: node0_balance / 2 - Decimal('10000'), chain_addrs[1]: node0_balance / 2 - Decimal('10000')}) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) singletxid = self.nodes[0].sendrawtransaction( hexstring=signedtx["hex"], maxfeerate=0) self.generate(self.nodes[0], 1, sync_fun=self.no_op) # Make a long chain of unconfirmed payments without hitting mempool limit # Each tx we make leaves only one output of change on a chain 1 longer # Since the amount to send is always much less than the outputs, we only ever need one output # So we should be able to generate exactly chainlimit txs for each # original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] for _ in range(chainlimit * 2): txid_list.append(self.nodes[0].sendtoaddress( sending_addr, Decimal('10000'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2) assert_equal(len(txid_list), chainlimit * 2) # Without walletrejectlongchains, we will still generate a txid # The tx will be stored in the wallet but not accepted to the mempool extra_txid = self.nodes[0].sendtoaddress( sending_addr, Decimal('10000')) assert extra_txid not in self.nodes[0].getrawmempool() assert extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()] self.nodes[0].abandontransaction(extra_txid) total_txs = len(self.nodes[0].listtransactions("*", 99999)) # Try with walletrejectlongchains # Double chain limit but require combining inputs, so we pass # SelectCoinsMinConf self.stop_node(0) self.start_node(0, self.extra_args[0] + ["-walletrejectlongchains", f"-limitancestorcount={str(2 * chainlimit)}"]) # wait until the wallet has submitted all transactions to the mempool self.wait_until( lambda: len(self.nodes[0].getrawmempool()) == chainlimit * 2) # Prevent potential race condition when calling wallet RPCs right after # restart self.nodes[0].syncwithvalidationinterfacequeue() node0_balance = self.nodes[0].getbalance() # With walletrejectlongchains we will not create the tx and store it in # our wallet. assert_raises_rpc_error(-6, "Transaction has too long of a mempool chain", self.nodes[0].sendtoaddress, sending_addr, node0_balance - Decimal('10000')) # Verify nothing new in wallet assert_equal(total_txs, len( self.nodes[0].listtransactions("*", 99999))) # Test getaddressinfo on external address. Note that these addresses # are taken from disablewallet.py assert_raises_rpc_error(-5, "Invalid address", self.nodes[0].getaddressinfo, "3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy") address_info = self.nodes[0].getaddressinfo( "mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ") assert_equal(address_info['address'], "ecregtest:qp8rs4qyd3aazk22eyzwg7fmdfzmxm02pyprkfhvm4") assert_equal(address_info["scriptPubKey"], "76a9144e3854046c7bd1594ac904e4793b6a45b36dea0988ac") assert not address_info["ismine"] assert not address_info["iswatchonly"] assert not address_info["isscript"] assert not address_info["ischange"] # Test getaddressinfo 'ischange' field on change address. self.generate(self.nodes[0], 1, sync_fun=self.no_op) destination = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(destination, 123000) tx = self.nodes[0].gettransaction(txid=txid, verbose=True)['decoded'] output_addresses = [vout['scriptPubKey']['addresses'][0] for vout in tx["vout"]] assert len(output_addresses) > 1 for address in output_addresses: ischange = self.nodes[0].getaddressinfo(address)['ischange'] assert_equal(ischange, address != destination) if ischange: change = address self.nodes[0].setlabel(change, 'foobar') assert_equal(self.nodes[0].getaddressinfo(change)['ischange'], False) # Test gettransaction response with different arguments. self.log.info( "Testing gettransaction response with different arguments...") self.nodes[0].setlabel(change, 'baz') baz = self.nodes[0].listtransactions(label="baz", count=1)[0] expected_receive_vout = {"label": "baz", "address": baz["address"], "amount": baz["amount"], "category": baz["category"], "vout": baz["vout"]} expected_fields = frozenset({'amount', 'confirmations', 'details', 'fee', 'hex', 'time', 'timereceived', 'trusted', 'txid', 'walletconflicts'}) verbose_field = "decoded" expected_verbose_fields = expected_fields | {verbose_field} self.log.debug("Testing gettransaction response without verbose") tx = self.nodes[0].gettransaction(txid=txid) - assert_equal(set([*tx]), expected_fields) + assert_equal(set(tx), expected_fields) assert_array_result( tx["details"], { "category": "receive"}, expected_receive_vout) self.log.debug( "Testing gettransaction response with verbose set to False") tx = self.nodes[0].gettransaction(txid=txid, verbose=False) - assert_equal(set([*tx]), expected_fields) + assert_equal(set(tx), expected_fields) assert_array_result( tx["details"], { "category": "receive"}, expected_receive_vout) self.log.debug( "Testing gettransaction response with verbose set to True") tx = self.nodes[0].gettransaction(txid=txid, verbose=True) - assert_equal(set([*tx]), expected_verbose_fields) + assert_equal(set(tx), expected_verbose_fields) assert_array_result( tx["details"], { "category": "receive"}, expected_receive_vout) assert_equal( tx[verbose_field], self.nodes[0].decoderawtransaction( tx["hex"])) if __name__ == '__main__': WalletTest().main() diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py index 8bb0bd6db..a50ef8511 100755 --- a/test/functional/wallet_labels.py +++ b/test/functional/wallet_labels.py @@ -1,190 +1,190 @@ #!/usr/bin/env python3 # Copyright (c) 2016-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test label RPCs. RPCs tested are: - getaddressesbylabel - listaddressgroupings - setlabel """ from collections import defaultdict from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error from test_framework.wallet_util import test_address class WalletLabelsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Check that there's no UTXO on the node node = self.nodes[0] assert_equal(len(node.listunspent()), 0) # Note each time we call generate, all generated coins go into # the same address, so we call twice to get two addresses w/50 each self.generatetoaddress(node, nblocks=1, address=node.getnewaddress( label='coinbase')) self.generatetoaddress(node, nblocks=101, address=node.getnewaddress( label='coinbase')) assert_equal(node.getbalance(), 100000000) # there should be 2 address groups # each with 1 address with a balance of 50 Bitcoins address_groups = node.listaddressgroupings() assert_equal(len(address_groups), 2) # the addresses aren't linked now, but will be after we send to the # common address linked_addresses = set() for address_group in address_groups: assert_equal(len(address_group), 1) assert_equal(len(address_group[0]), 3) assert_equal(address_group[0][1], 50000000) assert_equal(address_group[0][2], 'coinbase') linked_addresses.add(address_group[0][0]) # send 50 from each address to a third address not in this wallet common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr" node.sendmany( amounts={common_address: 100000000}, subtractfeefrom=[common_address], minconf=1, ) # there should be 1 address group, with the previously # unlinked addresses now linked (they both have 0 balance) address_groups = node.listaddressgroupings() assert_equal(len(address_groups), 1) assert_equal(len(address_groups[0]), 2) - assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses) + assert_equal({a[0] for a in address_groups[0]}, linked_addresses) assert_equal([a[1] for a in address_groups[0]], [0, 0]) self.generate(node, 1) # we want to reset so that the "" label has what's expected. # otherwise we're off by exactly the fee amount as that's mined # and matures in the next 100 blocks amount_to_send = 1000000 # Create labels and make sure subsequent label API calls # recognize the label/address associations. labels = [Label(name) for name in ("a", "b", "c", "d", "e")] for label in labels: address = node.getnewaddress(label.name) label.add_receive_address(address) label.verify(node) # Check all labels are returned by listlabels. assert_equal(node.listlabels(), sorted( ['coinbase'] + [label.name for label in labels])) # Send a transaction to each label. for label in labels: node.sendtoaddress(label.addresses[0], amount_to_send) label.verify(node) # Check the amounts received. self.generate(node, 1) for label in labels: assert_equal( node.getreceivedbyaddress(label.addresses[0]), amount_to_send) assert_equal(node.getreceivedbylabel(label.name), amount_to_send) for i, label in enumerate(labels): to_label = labels[(i + 1) % len(labels)] node.sendtoaddress(to_label.addresses[0], amount_to_send) self.generate(node, 1) for label in labels: address = node.getnewaddress(label.name) label.add_receive_address(address) label.verify(node) assert_equal(node.getreceivedbylabel(label.name), 2000000) label.verify(node) self.generate(node, 101) # Check that setlabel can assign a label to a new unused address. for label in labels: address = node.getnewaddress() node.setlabel(address, label.name) label.add_address(address) label.verify(node) assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "") # Check that addmultisigaddress can assign labels. if not self.options.descriptors: for label in labels: addresses = [] for _ in range(10): addresses.append(node.getnewaddress()) multisig_address = node.addmultisigaddress( 5, addresses, label.name)['address'] label.add_address(multisig_address) label.purpose[multisig_address] = "send" label.verify(node) self.generate(node, 101) # Check that setlabel can change the label of an address from a # different label. change_label(node, labels[0].addresses[0], labels[0], labels[1]) # Check that setlabel can set the label of an address already # in the label. This is a no-op. change_label(node, labels[2].addresses[0], labels[2], labels[2]) class Label: def __init__(self, name): # Label name self.name = name # Current receiving address associated with this label. self.receive_address = None # List of all addresses assigned with this label self.addresses = [] # Map of address to address purpose self.purpose = defaultdict(lambda: "receive") def add_address(self, address): assert_equal(address not in self.addresses, True) self.addresses.append(address) def add_receive_address(self, address): self.add_address(address) def verify(self, node): if self.receive_address is not None: assert self.receive_address in self.addresses for address in self.addresses: test_address(node, address, labels=[self.name]) assert self.name in node.listlabels() assert_equal( node.getaddressesbylabel(self.name), {address: {"purpose": self.purpose[address]} for address in self.addresses}) def change_label(node, address, old_label, new_label): assert_equal(address in old_label.addresses, True) node.setlabel(address, new_label.name) old_label.addresses.remove(address) new_label.add_address(address) old_label.verify(node) new_label.verify(node) if __name__ == '__main__': WalletLabelsTest().main() diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index edb56cbed..780de78f9 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -1,485 +1,483 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test multiwallet. Verify that a bitcoind node can load multiple wallet files """ import os import shutil import time from decimal import Decimal from threading import Thread from test_framework.authproxy import JSONRPCException from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import assert_equal, assert_raises_rpc_error, get_rpc_proxy got_loading_error = False def test_load_unload(node, name, timeout=60.): global got_loading_error t0 = time.time() while time.time() - t0 < timeout and not got_loading_error: try: node.loadwallet(name) node.unloadwallet(name) except JSONRPCException as e: if e.error['code'] == - \ 4 and 'Wallet already being loading' in e.error['message']: got_loading_error = True return # Add a small sleep to avoid CPU exhaustion in the unlikely case # the race never happens. time.sleep(0.001) class MultiWalletTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def add_options(self, parser): parser.add_argument( '--data_wallets_dir', default=os.path.join( os.path.dirname( os.path.realpath(__file__)), 'data/wallets/'), help='Test data with wallet directories (default: %(default)s)', ) def run_test(self): node = self.nodes[0] def data_dir(*p): return os.path.join(node.datadir, self.chain, *p) def wallet_dir(*p): return data_dir('wallets', *p) def wallet(name): return node.get_wallet_rpc(name) def wallet_file(name): if os.path.isdir(wallet_dir(name)): return wallet_dir(name, self.wallet_data_filename) return wallet_dir(name) assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]}) # check wallet.dat is created self.stop_nodes() assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True) # create symlink to verify wallet directory path can be referenced # through symlink if os.name != 'nt': os.mkdir(wallet_dir('w7')) os.symlink('w7', wallet_dir('w7_symlink')) # rename wallet.dat to make sure plain wallet file paths (as opposed to # directory paths) can be loaded os.rename(wallet_dir(self.default_wallet_name, self.wallet_data_filename), wallet_dir("w8")) # create another dummy wallet for use in testing backups later self.start_node( 0, ["-nowallet", f"-wallet={self.default_wallet_name}"]) self.stop_nodes() empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat') os.rename(wallet_dir(self.default_wallet_name, self.wallet_data_filename), empty_wallet) # restart node with a mix of wallet names: # w1, w2, w3 - to verify new wallets created when non-existing paths specified # w - to verify wallet name matching works when one wallet path is prefix of another # sub/w5 - to verify relative wallet path is created correctly # extern/w6 - to verify absolute wallet path is created correctly # w7_symlink - to verify symlinked wallet path is initialized correctly # w8 - to verify existing wallet file is loaded correctly # '' - to verify default wallet file is created correctly wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', self.default_wallet_name] if os.name == 'nt': wallet_names.remove('w7_symlink') extra_args = ['-nowallet'] + \ [f'-wallet={n}' for n in wallet_names] self.start_node(0, extra_args) assert_equal( - sorted(map(lambda w: w['name'], - self.nodes[0].listwalletdir()['wallets'])), + sorted(w['name'] for w in self.nodes[0].listwalletdir()['wallets']), [self.default_wallet_name, os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8']) assert_equal(set(node.listwallets()), set(wallet_names)) # check that all requested wallets were created self.stop_node(0) for wallet_name in wallet_names: assert_equal(os.path.isfile(wallet_file(wallet_name)), True) # should not initialize if wallet path can't be created exp_stderr = "boost::filesystem::create_director" self.nodes[0].assert_start_raises_init_error( ['-wallet=w8/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.nodes[0].assert_start_raises_init_error( ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist') self.nodes[0].assert_start_raises_init_error( ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir()) self.nodes[0].assert_start_raises_init_error( ['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir()) self.start_node(0, ['-wallet=w1', '-wallet=w1']) self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.') # should not initialize if one wallet is a copy of another shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy')) exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)" self.nodes[0].assert_start_raises_init_error( ['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) # should not initialize if wallet file is a symlink if os.name != 'nt': os.symlink('w8', wallet_dir('w8_symlink')) self.nodes[0].assert_start_raises_init_error( ['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX) # should not initialize if the specified walletdir does not exist self.nodes[0].assert_start_raises_init_error( ['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist') # should not initialize if the specified walletdir is not a directory not_a_dir = wallet_dir('notadir') open(not_a_dir, 'a', encoding="utf8").close() self.nodes[0].assert_start_raises_init_error( [f"-walletdir={not_a_dir}"], f"Error: Specified -walletdir \"{not_a_dir}\" is not a directory") # if wallets/ doesn't exist, datadir should be the default wallet dir wallet_dir2 = data_dir('walletdir') os.rename(wallet_dir(), wallet_dir2) self.start_node(0, ['-nowallet', '-wallet=w4', '-wallet=w5']) assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") self.generatetoaddress( node, nblocks=1, address=w5.getnewaddress(), sync_fun=self.no_op) # now if wallets/ exists again, but the rootdir is specified as the # walletdir, w4 and w5 should still be loaded os.rename(wallet_dir2, wallet_dir()) self.restart_node(0, ['-nowallet', '-wallet=w4', '-wallet=w5', f"-walletdir={data_dir()}"]) assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") w5_info = w5.getwalletinfo() assert_equal(w5_info['immature_balance'], 50000000) competing_wallet_dir = os.path.join( self.options.tmpdir, 'competing_walletdir') os.mkdir(competing_wallet_dir) self.restart_node(0, [f"-walletdir={competing_wallet_dir}"]) exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!" self.nodes[1].assert_start_raises_init_error( [f"-walletdir={competing_wallet_dir}"], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.restart_node(0, extra_args) - assert_equal(sorted(map(lambda w: w['name'], - self.nodes[0].listwalletdir()['wallets'])), - [self.default_wallet_name, os.path.join('sub', 'w5'), 'w', - 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy']) + assert_equal( + sorted(w['name'] for w in self.nodes[0].listwalletdir()['wallets']), + [self.default_wallet_name, os.path.join('sub', 'w5'), 'w', 'w1', 'w2', + 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy']) wallets = [wallet(w) for w in wallet_names] wallet_bad = wallet("bad") # check wallet names and balances self.generatetoaddress( node, nblocks=1, address=wallets[0].getnewaddress(), sync_fun=self.no_op) for wallet_name, wallet in zip(wallet_names, wallets): info = wallet.getwalletinfo() assert_equal(info['immature_balance'], 50000000 if wallet is wallets[0] else 0) assert_equal(info['walletname'], wallet_name) # accessing invalid wallet fails assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo) # accessing wallet RPC without using wallet endpoint fails assert_raises_rpc_error(-19, "Wallet file not specified (must request wallet RPC through /wallet/ uri-path).", node.getwalletinfo) w1, w2, w3, w4, *_ = wallets self.generatetoaddress( node, nblocks=101, address=w1.getnewaddress(), sync_fun=self.no_op) assert_equal(w1.getbalance(), 100000000) assert_equal(w2.getbalance(), 0) assert_equal(w3.getbalance(), 0) assert_equal(w4.getbalance(), 0) w1.sendtoaddress(w2.getnewaddress(), 1000000) w1.sendtoaddress(w3.getnewaddress(), 2000000) w1.sendtoaddress(w4.getnewaddress(), 3000000) self.generatetoaddress( node, nblocks=1, address=w1.getnewaddress(), sync_fun=self.no_op) assert_equal(w2.getbalance(), 1000000) assert_equal(w3.getbalance(), 2000000) assert_equal(w4.getbalance(), 3000000) batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()]) assert_equal(batch[0]["result"]["chain"], self.chain) assert_equal(batch[1]["result"]["walletname"], "w1") self.log.info('Check for per-wallet settxfee call') assert_equal(w1.getwalletinfo()['paytxfee'], 0) assert_equal(w2.getwalletinfo()['paytxfee'], 0) w2.settxfee(1000) assert_equal(w1.getwalletinfo()['paytxfee'], 0) assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('1000.00')) self.log.info("Test dynamic wallet loading") self.restart_node(0, ['-nowallet']) assert_equal(node.listwallets(), []) assert_raises_rpc_error( -18, "No wallet is loaded. Load a wallet using loadwallet or create a new" " one with createwallet. (Note: A default wallet is no longer " "automatically created)", node.getwalletinfo ) self.log.info("Load first wallet") loadwallet_name = node.loadwallet(wallet_names[0]) assert_equal(loadwallet_name['name'], wallet_names[0]) assert_equal(node.listwallets(), wallet_names[0:1]) node.getwalletinfo() w1 = node.get_wallet_rpc(wallet_names[0]) w1.getwalletinfo() self.log.info("Load second wallet") loadwallet_name = node.loadwallet(wallet_names[1]) assert_equal(loadwallet_name['name'], wallet_names[1]) assert_equal(node.listwallets(), wallet_names[0:2]) assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo) w2 = node.get_wallet_rpc(wallet_names[1]) w2.getwalletinfo() self.log.info("Concurrent wallet loading") threads = [] for _ in range(3): n = node.cli if self.options.usecli else get_rpc_proxy( node.url, 1, timeout=600, coveragedir=node.coverage_dir) t = Thread( target=test_load_unload, args=( n, wallet_names[2], 20 * self.rpc_timeout)) t.start() threads.append(t) for t in threads: t.join() global got_loading_error assert_equal(got_loading_error, True) self.log.info("Load remaining wallets") for wallet_name in wallet_names[2:]: loadwallet_name = self.nodes[0].loadwallet(wallet_name) assert_equal(loadwallet_name['name'], wallet_name) assert_equal(set(self.nodes[0].listwallets()), set(wallet_names)) # Fail to load if wallet doesn't exist path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets") assert_raises_rpc_error( -18, f"Wallet file verification failed. Failed to load database path '{path}'. " "Path does not exist.", self.nodes[0].loadwallet, 'wallets') # Fail to load duplicate wallets path = os.path.join( self.options.tmpdir, "node0", "regtest", "wallets", "w1", self.wallet_data_filename) assert_raises_rpc_error( -4, "Wallet file verification failed. Refusing to load database. " f"Data file '{path}' is already loaded.", self.nodes[0].loadwallet, wallet_names[0]) # Fail to load duplicate wallets by different ways (directory and # filepath) path = os.path.join( self.options.tmpdir, "node0", "regtest", "wallets", self.wallet_data_filename) assert_raises_rpc_error( -4, "Wallet file verification failed. Refusing to load database. " f"Data file '{path}' is already loaded.", self.nodes[0].loadwallet, self.wallet_data_filename) # Fail to load if one wallet is a copy of another assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if one wallet is a copy of another. # Test this twice to make sure that we don't re-introduce # https://github.com/bitcoin/bitcoin/issues/14304 assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if wallet file is a symlink if os.name != 'nt': assert_raises_rpc_error( -4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink') # Fail to load if a directory is specified that doesn't contain a # wallet os.mkdir(wallet_dir('empty_wallet_dir')) path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir") assert_raises_rpc_error( -18, f"Wallet file verification failed. Failed to load database path '{path}'. " "Data is not in recognized format.", self.nodes[0].loadwallet, 'empty_wallet_dir') self.log.info("Test dynamic wallet creation.") # Fail to create a wallet if it already exists. path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2") assert_raises_rpc_error( -4, f"Failed to create database path '{path}'. Database already exists.", self.nodes[0].createwallet, 'w2') # Successfully create a wallet with a new name loadwallet_name = self.nodes[0].createwallet('w9') assert_equal(loadwallet_name['name'], 'w9') w9 = node.get_wallet_rpc('w9') assert_equal(w9.getwalletinfo()['walletname'], 'w9') assert 'w9' in self.nodes[0].listwallets() # Successfully create a wallet using a full path new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir') new_wallet_name = os.path.join(new_wallet_dir, 'w10') loadwallet_name = self.nodes[0].createwallet(new_wallet_name) assert_equal(loadwallet_name['name'], new_wallet_name) w10 = node.get_wallet_rpc(new_wallet_name) assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name) assert new_wallet_name in self.nodes[0].listwallets() self.log.info("Test dynamic wallet unloading") # Test `unloadwallet` errors assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet) assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy") assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet) assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"), # Successfully unload the specified wallet name self.nodes[0].unloadwallet("w1") assert 'w1' not in self.nodes[0].listwallets() # Successfully unload the wallet referenced by the request endpoint # Also ensure unload works during walletpassphrase timeout w2.encryptwallet('test') w2.walletpassphrase('test', 1) w2.unloadwallet() time.sleep(1.1) assert 'w2' not in self.nodes[0].listwallets() # Successfully unload all wallets for wallet_name in self.nodes[0].listwallets(): self.nodes[0].unloadwallet(wallet_name) assert_equal(self.nodes[0].listwallets(), []) assert_raises_rpc_error( -18, "No wallet is loaded. Load a wallet using loadwallet or create a new" " one with createwallet. (Note: A default wallet is no longer " "automatically created)", self.nodes[0].getwalletinfo ) # Successfully load a previously unloaded wallet self.nodes[0].loadwallet('w1') assert_equal(self.nodes[0].listwallets(), ['w1']) assert_equal(w1.getwalletinfo()['walletname'], 'w1') - assert_equal(sorted(map(lambda w: w['name'], - self.nodes[0].listwalletdir()['wallets'])), - [self.default_wallet_name, os.path.join('sub', 'w5'), 'w', - 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', - 'w9']) + assert_equal( + sorted(w['name'] for w in self.nodes[0].listwalletdir()['wallets']), + [self.default_wallet_name, os.path.join('sub', 'w5'), 'w', 'w1', 'w2', + 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9']) # Test backing up and restoring wallets self.log.info("Test wallet backup") self.restart_node(0, ['-nowallet']) for wallet_name in wallet_names: self.nodes[0].loadwallet(wallet_name) for wallet_name in wallet_names: rpc = self.nodes[0].get_wallet_rpc(wallet_name) addr = rpc.getnewaddress() backup = os.path.join(self.options.tmpdir, 'backup.dat') rpc.backupwallet(backup) self.nodes[0].unloadwallet(wallet_name) shutil.copyfile(empty_wallet, wallet_file(wallet_name)) self.nodes[0].loadwallet(wallet_name) assert_equal(rpc.getaddressinfo(addr)['ismine'], False) self.nodes[0].unloadwallet(wallet_name) shutil.copyfile(backup, wallet_file(wallet_name)) self.nodes[0].loadwallet(wallet_name) assert_equal(rpc.getaddressinfo(addr)['ismine'], True) # Test .walletlock file is closed self.start_node(1) wallet = os.path.join(self.options.tmpdir, 'my_wallet') self.nodes[0].createwallet(wallet) assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet) self.nodes[0].unloadwallet(wallet) self.nodes[1].loadwallet(wallet) if __name__ == '__main__': MultiWalletTest().main() diff --git a/test/functional/wallet_startup.py b/test/functional/wallet_startup.py index 33ee6418a..a2d9cf02b 100755 --- a/test/functional/wallet_startup.py +++ b/test/functional/wallet_startup.py @@ -1,59 +1,59 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test wallet load on startup. Verify that a bitcoind node can maintain list of wallets loading on startup """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class WalletStartupTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.supports_cli = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_nodes(self): self.add_nodes(self.num_nodes) self.start_nodes() def run_test(self): self.log.info('Should start without any wallets') assert_equal(self.nodes[0].listwallets(), []) assert_equal(self.nodes[0].listwalletdir(), {'wallets': []}) self.log.info( 'New default wallet should load by default when there are no other wallets') self.nodes[0].createwallet(wallet_name='', load_on_startup=False) self.restart_node(0) assert_equal(self.nodes[0].listwallets(), ['']) self.log.info('Test load on startup behavior') self.nodes[0].createwallet(wallet_name='w0', load_on_startup=True) self.nodes[0].createwallet(wallet_name='w1', load_on_startup=False) self.nodes[0].createwallet(wallet_name='w2', load_on_startup=True) self.nodes[0].createwallet(wallet_name='w3', load_on_startup=False) self.nodes[0].createwallet(wallet_name='w4', load_on_startup=False) self.nodes[0].unloadwallet(wallet_name='w0', load_on_startup=False) self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False) self.nodes[0].loadwallet(filename='w4', load_on_startup=True) assert_equal(set(self.nodes[0].listwallets()), - set(('', 'w1', 'w2', 'w3', 'w4'))) + {'', 'w1', 'w2', 'w3', 'w4'}) self.restart_node(0) - assert_equal(set(self.nodes[0].listwallets()), set(('', 'w2', 'w4'))) + assert_equal(set(self.nodes[0].listwallets()), {'', 'w2', 'w4'}) self.nodes[0].unloadwallet(wallet_name='', load_on_startup=False) self.nodes[0].unloadwallet(wallet_name='w4', load_on_startup=False) self.nodes[0].loadwallet(filename='w3', load_on_startup=True) self.nodes[0].loadwallet(filename='') self.restart_node(0) - assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3'))) + assert_equal(set(self.nodes[0].listwallets()), {'w2', 'w3'}) if __name__ == '__main__': WalletStartupTest().main() diff --git a/test/lint/check-doc.py b/test/lint/check-doc.py index c442ab157..e8ab2bb3a 100755 --- a/test/lint/check-doc.py +++ b/test/lint/check-doc.py @@ -1,103 +1,103 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Copyright (c) 2019 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' This checks if all command line args are documented. Return value is 0 to indicate no error. Author: @MarcoFalke ''' import glob import re from pprint import PrettyPrinter from subprocess import check_output TOP_LEVEL = 'git rev-parse --show-toplevel' FOLDERS_SRC = ['/src/**/', '/chronik/**/'] FOLDERS_TEST = ['/src/**/test/', '/chronik/test/**/'] EXTENSIONS = ["*.c", "*.h", "*.cpp", "*.cc", "*.hpp"] REGEX_ARG = r'(?:ForceSet|SoftSet|Get|Is)(?:Bool|Int)?Args?(?:Set)?\(\s*"(-[^"]+)"' REGEX_DOC = r'AddArg\(\s*"(-[^"=]+?)(?:=|")' # list false positive unknows arguments -SET_FALSE_POSITIVE_UNKNOWNS = set([ +SET_FALSE_POSITIVE_UNKNOWNS = { '-includeconf', '-regtest', '-testnet', '-zmqpubhashblock', '-zmqpubhashtx', '-zmqpubrawblock', '-zmqpubrawtx', '-zmqpubhashblockhwm', '-zmqpubhashtxhwm', '-zmqpubrawblockhwm', '-zmqpubrawtxhwm', '-zmqpubsequence', '-zmqpubsequencehwm', -]) +} # list false positive undocumented arguments -SET_FALSE_POSITIVE_UNDOCUMENTED = set([ +SET_FALSE_POSITIVE_UNDOCUMENTED = { '-help', '-h', '-avalanchepreconsensus', '-dbcrashratio', '-enableminerfund', '-forcecompactdb', '-maxaddrtosend', '-parkdeepreorg', '-automaticunparking', # Removed arguments that now just print a helpful error message '-zapwallettxes', '-replayprotectionactivationtime', # Remove after May 2023 upgrade '-wellingtonactivationtime', -]) +} def main(): top_level = check_output(TOP_LEVEL, shell=True, universal_newlines=True, encoding='utf8').strip() source_files = [] test_files = [] for extension in EXTENSIONS: for folder_src in FOLDERS_SRC: source_files += glob.glob(top_level + folder_src + extension, recursive=True) for folder_test in FOLDERS_TEST: test_files += glob.glob(top_level + folder_test + extension, recursive=True) files = set(source_files) - set(test_files) args_used = set() args_docd = set() for file in files: with open(file, 'r', encoding='utf-8') as f: content = f.read() args_used |= set(re.findall(re.compile(REGEX_ARG), content)) args_docd |= set(re.findall(re.compile(REGEX_DOC), content)) args_used |= SET_FALSE_POSITIVE_UNKNOWNS args_docd |= SET_FALSE_POSITIVE_UNDOCUMENTED args_need_doc = args_used - args_docd args_unknown = args_docd - args_used pp = PrettyPrinter() print(f"Args used : {len(args_used)}") print(f"Args documented : {len(args_docd)}") print(f"Args undocumented: {len(args_need_doc)}") pp.pprint(args_need_doc) print(f"Args unknown : {len(args_unknown)}") pp.pprint(args_unknown) if __name__ == "__main__": main()