diff --git a/.arclint b/.arclint index c9f6dc82d..6951f3183 100644 --- a/.arclint +++ b/.arclint @@ -1,316 +1,320 @@ { "linters": { "generated": { "type": "generated" }, "clang-format": { "type": "clang-format", "version": ">=12.0", "bin": ["clang-format-12", "clang-format"], "include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "autopep8": { "type": "autopep8", "version": ">=1.3.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--aggressive", "--ignore=W503,W504", "--max-line-length=88" ] }, "flake8": { "type": "flake8", "version": ">=3.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--ignore=E303,E305,E501,E704,W503,W504" ] }, "lint-format-strings": { "type": "lint-format-strings", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/test/fuzz/strprintf.cpp$)" ] }, "check-doc": { "type": "check-doc", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)" }, "lint-tests": { "type": "lint-tests", "include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)" }, "phpcs": { "type": "phpcs", "include": "(\\.php$)", "exclude": [ "(^arcanist/__phutil_library_.+\\.php$)" ], "phpcs.standard": "arcanist/phpcs.xml" }, "lint-locale-dependence": { "type": "lint-locale-dependence", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/))", "(^src/bench/nanobench.h$)" ] }, "lint-cheader": { "type": "lint-cheader", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "spelling": { "type": "spelling", "exclude": [ "(^build-aux/m4/)", "(^depends/)", "(^doc/release-notes/)", "(^contrib/gitian-builder/)", "(^src/(qt/locale|secp256k1|univalue|leveldb)/)", "(^test/lint/dictionary/)", "(package-lock.json)" ], "spelling.dictionaries": [ "test/lint/dictionary/english.json" ] }, "lint-assert-with-side-effects": { "type": "lint-assert-with-side-effects", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-quotes": { "type": "lint-include-quotes", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-guard": { "type": "lint-include-guard", "include": "(^(src|chronik)/.*\\.h$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/tinyformat.h$)" ] }, "lint-include-source": { "type": "lint-include-source", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-std-chrono": { "type": "lint-std-chrono", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-stdint": { "type": "lint-stdint", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/assumptions.h$)" ] }, "lint-source-filename": { "type": "lint-source-filename", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-boost-dependencies": { "type": "lint-boost-dependencies", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-python-encoding": { "type": "lint-python-encoding", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-python-shebang": { "type": "lint-python-shebang", "include": "(\\.py$)", "exclude": [ "(__init__\\.py$)", "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-bash-shebang": { "type": "lint-bash-shebang", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)" ] }, "shellcheck": { "type": "shellcheck", "version": ">=0.7.0", "flags": [ "--external-sources", "--source-path=SCRIPTDIR" ], "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)" ] }, "lint-shell-locale": { "type": "lint-shell-locale", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)", "(^cmake/utils/log-and-print-on-failure.sh)" ] }, "lint-cpp-void-parameters": { "type": "lint-cpp-void-parameters", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/glibc_compat.cpp$)" ] }, "lint-logs": { "type": "lint-logs", "include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)" }, "lint-qt": { "type": "lint-qt", "include": "(^src/qt/.*\\.(h|cpp)$)", "exclude": [ "(^src/qt/(locale|forms|res)/)" ] }, "lint-doxygen": { "type": "lint-doxygen", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-whitespace": { "type": "lint-whitespace", "include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "yamllint": { "type": "yamllint", "include": "(\\.(yml|yaml)$)", "exclude": "(^src/(secp256k1|univalue|leveldb)/)" }, "lint-check-nonfatal": { "type": "lint-check-nonfatal", "include": [ "(^src/rpc/.*\\.(h|c|cpp)$)", "(^src/wallet/rpc*.*\\.(h|c|cpp)$)" ], "exclude": "(^src/rpc/server.cpp)" }, "lint-markdown": { "type": "lint-markdown", "include": [ "(\\.md$)" ], "exclude": "(^contrib/gitian-builder/)" }, "lint-python-mypy": { "type": "lint-python-mypy", "version": ">=0.780", "include": "(\\.py$)", - "exclude": "(^contrib/)", + "exclude": [ + "(^contrib/gitian-builder/)", + "(^contrib/apple-sdk-tools/)", + "(^contrib/macdeploy/)" + ], "flags": [ "--ignore-missing-imports" ] }, "lint-python-mutable-default": { "type": "lint-python-mutable-default", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "prettier": { "type": "prettier", "version":">=2.4.1", "include": "(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)", "exclude": "(^web/.*/translations/.*\\.json$)" }, "lint-python-isort": { "type": "lint-python-isort", "version": ">=5.6.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "rustfmt": { "type": "rustfmt", "version": ">=1.5.1", "include": "(\\.rs$)" }, "eslint": { "type": "eslint", "version": ">=8.0.0", "include": [ "(cashtab/.*\\.js$)", "(apps/alias-server/.*\\.js$)", "(modules/ecashaddrjs/.*\\.js$)", "(apps/ecash-herald/.*\\.js$)" ] }, "lint-python-flynt": { "type": "lint-python-flynt", "version": ">=0.78", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] } } } diff --git a/contrib/devtools/circular-dependencies.py b/contrib/devtools/circular-dependencies.py index 36711103b..e6c2b5612 100755 --- a/contrib/devtools/circular-dependencies.py +++ b/contrib/devtools/circular-dependencies.py @@ -1,93 +1,94 @@ #!/usr/bin/env python3 # Copyright (c) 2018-2020 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import re import sys +from typing import Dict, List, Set MAPPING = { 'core_read.cpp': 'core_io.cpp', 'core_write.cpp': 'core_io.cpp', } # Directories with header-based modules, where the assumption that .cpp files # define functions and variables declared in corresponding .h files is # incorrect. HEADER_MODULE_PATHS = [ 'interfaces/' ] def module_name(path): if path in MAPPING: path = MAPPING[path] if any(path.startswith(dirpath) for dirpath in HEADER_MODULE_PATHS): return path if path.endswith(".h"): return path[:-2] if path.endswith(".c"): return path[:-2] if path.endswith(".cpp"): return path[:-4] return None files = {} -deps = {} +deps: Dict[str, Set[str]] = {} RE = re.compile("^#include <(.*)>") # Iterate over files, and create list of modules for arg in sys.argv[1:]: module = module_name(arg) if module is None: print(f"Ignoring file {arg} (does not constitute module)\n") else: files[arg] = module deps[module] = set() # Iterate again, and build list of direct dependencies for each module # TODO: implement support for multiple include directories for arg in sorted(files.keys()): module = files[arg] with open(arg, 'r', encoding="utf8") as f: for line in f: match = RE.match(line) if match: include = match.group(1) included_module = module_name(include) if included_module is not None and included_module in deps and included_module != module: deps[module].add(included_module) # Loop to find the shortest (remaining) circular dependency have_cycle = False while True: shortest_cycle = None for module in sorted(deps.keys()): # Build the transitive closure of dependencies of module - closure = {dep: [] for dep in deps[module]} + closure: Dict[str, List[str]] = {dep: [] for dep in deps[module]} while True: old_size = len(closure) old_closure_keys = sorted(closure.keys()) for src in old_closure_keys: for dep in deps[src]: if dep not in closure: closure[dep] = closure[src] + [src] if len(closure) == old_size: break # If module is in its own transitive closure, it's a circular # dependency; check if it is the shortest if module in closure and (shortest_cycle is None or len( closure[module]) + 1 < len(shortest_cycle)): shortest_cycle = [module] + closure[module] if shortest_cycle is None: break # We have the shortest circular dependency; report it module = shortest_cycle[0] print(f"Circular dependency: {' -> '.join(shortest_cycle + [module])}") # And then break the dependency to avoid repeating in other cycles deps[shortest_cycle[-1]] = deps[shortest_cycle[-1]] - set([module]) have_cycle = True sys.exit(1 if have_cycle else 0) diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 708913344..002513721 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -1,344 +1,345 @@ #!/usr/bin/env python3 # # linearize-data.py: Construct a linear, no-fork version of the chain. # # Copyright (c) 2013-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from __future__ import division, print_function import datetime import hashlib import os import os.path import re import struct import sys import time from binascii import unhexlify from collections import namedtuple +from typing import Any, Dict -settings = {} +settings: Dict[str, Any] = {} def hex_switchEndian(s): """ Switches the endianness of a hex string (in pairs of hex chars) """ pairList = [s[i:i + 2].encode() for i in range(0, len(s), 2)] return b''.join(pairList[::-1]).decode() def uint32(x): return x & 0xffffffff def bytereverse(x): return uint32((((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24))) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i + 4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return b''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i + 4]) out_words.reverse() return b''.join(out_words) def calc_hdr_hash(blk_hdr): hash1 = hashlib.sha256() hash1.update(blk_hdr) hash1_o = hash1.digest() hash2 = hashlib.sha256() hash2.update(hash1_o) hash2_o = hash2.digest() return hash2_o def calc_hash_str(blk_hdr): hash = calc_hdr_hash(blk_hdr) hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.hex() return hash_str def get_blk_dt(blk_hdr): members = struct.unpack(" self.maxOutSz): self.outF.close() if self.setFileTime: os.utime(self.outFname, (int(time.time()), self.highTS)) self.outF = None self.outFname = None self.outFn = self.outFn + 1 self.outsz = 0 (blkDate, blkTS) = get_blk_dt(blk_hdr) if self.timestampSplit and (blkDate > self.lastDate): print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str) self.lastDate = blkDate if self.outF: self.outF.close() if self.setFileTime: os.utime(self.outFname, (int(time.time()), self.highTS)) self.outF = None self.outFname = None self.outFn = self.outFn + 1 self.outsz = 0 if not self.outF: if self.fileOutput: self.outFname = self.settings['output_file'] else: self.outFname = os.path.join( self.settings['output'], f"blk{self.outFn:05d}.dat") print("Output file " + self.outFname) self.outF = open(self.outFname, "wb") self.outF.write(inhdr) self.outF.write(blk_hdr) self.outF.write(rawblock) self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock) self.blkCountOut = self.blkCountOut + 1 if blkTS > self.highTS: self.highTS = blkTS if (self.blkCountOut % 1000) == 0: print('{} blocks scanned, {} blocks written (of {}, {:.1f}% complete)'.format( self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex))) def inFileName(self, fn): return os.path.join(self.settings['input'], f"blk{fn:05d}.dat") def fetchBlock(self, extent): '''Fetch block contents from disk given extents''' with open(self.inFileName(extent.fn), "rb") as f: f.seek(extent.offset) return f.read(extent.size) def copyOneBlock(self): '''Find the next block to be written in the input, and copy it to the output.''' extent = self.blockExtents.pop(self.blkCountOut) if self.blkCountOut in self.outOfOrderData: # If the data is cached, use it from memory and remove from the # cache rawblock = self.outOfOrderData.pop(self.blkCountOut) self.outOfOrderSize -= len(rawblock) else: # Otherwise look up data on disk rawblock = self.fetchBlock(extent) self.writeBlock(extent.inhdr, extent.blkhdr, rawblock) def run(self): while self.blkCountOut < len(self.blkindex): if not self.inF: fname = self.inFileName(self.inFn) print("Input file " + fname) try: self.inF = open(fname, "rb") except IOError: print("Premature end of block data") return inhdr = self.inF.read(8) if (not inhdr or (inhdr[0] == "\0")): self.inF.close() self.inF = None self.inFn = self.inFn + 1 continue inMagic = inhdr[:4] if (inMagic != self.settings['netmagic']): print("Invalid magic: " + inMagic.hex()) return inLenLE = inhdr[4:] su = struct.unpack(" Any: if hasattr(obj, "__dict__"): return obj.__dict__ elif hasattr(obj, "__slots__"): ret: Dict[str, Any] = {} for slot in obj.__slots__: val = getattr(obj, slot, None) if slot in HASH_INTS and isinstance(val, int): ret[slot] = ser_uint256(val).hex() - elif slot in HASH_INT_VECTORS and isinstance(val[0], int): + elif slot in HASH_INT_VECTORS and isinstance(val, list) and isinstance(val[0], int): ret[slot] = [ser_uint256(a).hex() for a in val] else: ret[slot] = to_jsonable(val) return ret elif isinstance(obj, list): return [to_jsonable(a) for a in obj] elif isinstance(obj, bytes): return obj.hex() else: return obj def process_file(path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: with open(path, 'rb') as f_in: if progress_bar: bytes_read = 0 while True: if progress_bar: # Update progress bar diff = f_in.tell() - bytes_read - 1 progress_bar.update(diff) bytes_read = f_in.tell() - 1 # Read the Header tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) if not tmp_header_raw: break tmp_header = BytesIO(tmp_header_raw) time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") msgtype: bytes = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # Start converting the message to a dictionary - msg_dict = {} + msg_dict: Dict[str, Union[int, str]] = {} msg_dict["direction"] = "recv" if recv else "sent" msg_dict["time"] = time # "size" is less readable here, but more readable in the output msg_dict["size"] = length msg_ser = BytesIO(f_in.read(length)) # Determine message type if msgtype not in MESSAGEMAP: # Unrecognized message type try: msgtype_tmp = msgtype.decode() if not msgtype_tmp.isprintable(): - raise UnicodeDecodeError + raise MessageTypeNotPrintableError msg_dict["msgtype"] = msgtype_tmp - except UnicodeDecodeError: + except (UnicodeDecodeError, MessageTypeNotPrintableError): msg_dict["msgtype"] = "UNREADABLE" msg_dict["body"] = msg_ser.read().hex() msg_dict["error"] = "Unrecognized message type." messages.append(msg_dict) print( - f"WARNING - Unrecognized message type {msgtype} in {path}", + f"WARNING - Unrecognized message type {msgtype!r} in {path}", file=sys.stderr) continue # Deserialize the message msg = MESSAGEMAP[msgtype]() msg_dict["msgtype"] = msgtype.decode() try: msg.deserialize(msg_ser) except KeyboardInterrupt: raise except Exception: # Unable to deserialize message body msg_ser.seek(0, os.SEEK_SET) msg_dict["body"] = msg_ser.read().hex() msg_dict["error"] = "Unable to deserialize message." messages.append(msg_dict) print( f"WARNING - Unable to deserialize message in {path}", file=sys.stderr) continue # Convert body of message into a jsonable object if length: msg_dict["body"] = to_jsonable(msg) messages.append(msg_dict) if progress_bar: # Update the progress bar to the end of the current file # in case we exited the loop early # Go to end of file f_in.seek(0, os.SEEK_END) diff = f_in.tell() - bytes_read - 1 progress_bar.update(diff) def main(): parser = argparse.ArgumentParser( description=__doc__, epilog="EXAMPLE \n\t{0} -o out.json /message_capture/**/*.dat".format( sys.argv[0]), formatter_class=argparse.RawTextHelpFormatter) parser.add_argument( "capturepaths", nargs='+', help="binary message capture files to parse.") parser.add_argument( "-o", "--output", help="output file. If unset print to stdout") parser.add_argument( "-n", "--no-progress-bar", action='store_true', help="disable the progress bar. Automatically set if the output is not a terminal") args = parser.parse_args() capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] output = Path.cwd() / Path(args.output) if args.output else False use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() messages: List[Any] = [] if use_progress_bar: total_size = sum(capture.stat().st_size for capture in capturepaths) progress_bar = ProgressBar(total_size) else: progress_bar = None for capture in capturepaths: process_file( str(capture), messages, "recv" in capture.stem, progress_bar) messages.sort(key=lambda msg: msg['time']) if use_progress_bar: progress_bar.set_progress(1) jsonrep = json.dumps(messages) if output: with open(str(output), 'w+', encoding="utf8") as f_out: f_out.write(jsonrep) else: print(jsonrep) if __name__ == "__main__": main() diff --git a/contrib/teamcity/build-configurations.py b/contrib/teamcity/build-configurations.py index abfec1bc8..cf625c51d 100755 --- a/contrib/teamcity/build-configurations.py +++ b/contrib/teamcity/build-configurations.py @@ -1,566 +1,566 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import argparse import asyncio import os import shutil import stat import subprocess import sys from pathlib import Path, PurePath from string import Template import yaml from deepmerge import always_merger -from teamcity import is_running_under_teamcity +from teamcity import is_running_under_teamcity # type: ignore from teamcity.messages import TeamcityServiceMessages # Default timeout value in seconds. Should be overridden by the # configuration file. DEFAULT_TIMEOUT = 1 * 60 * 60 if sys.version_info < (3, 6): raise SystemError("This script requires python >= 3.6") class BuildConfiguration: def __init__(self, script_root, config_file, build_name=None): self.script_root = script_root self.config_file = config_file self.name = None self.config = {} self.cmake_flags = [] self.build_steps = [] self.build_directory = None self.junit_reports_dir = None self.test_logs_dir = None self.jobs = (os.cpu_count() or 0) + 1 self.project_root = PurePath( subprocess.run( ['git', 'rev-parse', '--show-toplevel'], capture_output=True, check=True, encoding='utf-8', text=True, ).stdout.strip() ) if not config_file.is_file(): raise FileNotFoundError( f"The configuration file does not exist {str(config_file)}" ) if build_name is not None: self.load(build_name) def load(self, build_name): self.name = build_name # Read the configuration with open(self.config_file, encoding="utf-8") as f: config = yaml.safe_load(f) # The configuration root should contain a mandatory element "builds", and # it should not be empty. if not config.get("builds", None): raise AssertionError( "Invalid configuration file {}: the \"builds\" element is missing or empty".format( str(self.config_file) ) ) # Check the target build has an entry in the configuration file build = config["builds"].get(self.name, None) if not build: raise AssertionError( "{} is not a valid build identifier. Valid identifiers are {}".format( self.name, list(config.keys()) ) ) # Get a list of the templates, if any templates = config.get("templates", {}) # If the build references some templates, merge all the configurations. # The merge is applied in the same order as the templates are declared # in the template list. template_config = {} template_names = build.get("templates", []) for template_name in template_names: # Raise an error if the template does not exist if template_name not in templates: raise AssertionError( "Build {} configuration inherits from template {}, but the template does not exist.".format( self.name, template_name ) ) always_merger.merge(template_config, templates.get(template_name)) self.config = always_merger.merge(template_config, build) # Create the build directory as needed self.build_directory = Path( self.project_root.joinpath( 'abc-ci-builds', self.name)) # Define the junit and logs directories self.junit_reports_dir = self.build_directory.joinpath("test/junit") self.test_logs_dir = self.build_directory.joinpath("test/log") self.functional_test_logs = self.build_directory.joinpath( "test/tmp/test_runner_*") # We will provide the required environment variables self.environment_variables = { "BUILD_DIR": str(self.build_directory), "CMAKE_PLATFORMS_DIR": self.project_root.joinpath("cmake", "platforms"), "THREADS": str(self.jobs), "TOPLEVEL": str(self.project_root), } def create_script_file(self, dest, content): # Write the content to a script file using a template with open(self.script_root.joinpath("bash_script.sh.in"), encoding='utf-8') as f: script_template_content = f.read() template = Template(script_template_content) with open(dest, 'w', encoding='utf-8') as f: f.write( template.safe_substitute( **self.environment_variables, SCRIPT_CONTENT=content, ) ) dest.chmod(dest.stat().st_mode | stat.S_IEXEC) def create_build_steps(self, artifact_dir): # There are 2 possibilities to define the build steps: # - By manually defining a script to run. # - By defining the configuration options and a list of target groups to # run. The configuration step should be run once then all the targets # groups. Each target group can contain 1 or more targets which # should be run parallel. script = self.config.get("script", None) if script: script_file = self.build_directory.joinpath("script.sh") self.create_script_file(script_file, script) self.build_steps = [ { "bin": str(script_file), "args": [], } ] return # Get the cmake configuration definitions. self.cmake_flags = self.config.get("cmake_flags", []) self.cmake_flags.append(f"-DCMAKE_INSTALL_PREFIX={str(artifact_dir)}") # Get the targets to build. If none is provided then raise an error. targets = self.config.get("targets", None) if not targets: raise AssertionError( "No build target has been provided for build {} and no script is defined, aborting".format( self.name ) ) # Some more flags for the build_cmake.sh script if self.config.get("clang", False): self.cmake_flags.extend([ "-DCMAKE_C_COMPILER=clang", "-DCMAKE_CXX_COMPILER=clang++", ]) if self.config.get("gcc", False): self.cmake_flags.extend([ "-DCMAKE_C_COMPILER=gcc", "-DCMAKE_CXX_COMPILER=g++", ]) if self.config.get("junit", True): self.cmake_flags.extend([ "-DENABLE_JUNIT_REPORT=ON", ]) if self.config.get("Werror", False): self.cmake_flags.extend([ "-DCMAKE_C_FLAGS=-Werror", "-DCMAKE_CXX_FLAGS=-Werror", ]) # Get the generator, default to ninja generator = self.config.get("generator", {}) generator_name = generator.get("name", "Ninja") generator_command = generator.get("command", "ninja") # If the build runs on diff or has the fail_fast flag, exit on first error. # Otherwise keep running so we can gather more test result. fail_fast = self.config.get( "fail_fast", False) or self.config.get( "runOnDiff", False) generator_flags = generator.get( "flags", ["-k0"] if not fail_fast else []) # Max out the jobs by default when the generator uses make if generator_command == "make": generator_flags.append(f"-j{self.jobs}") # Handle cross build configuration cross_build = self.config.get("cross_build", None) if cross_build: static_depends = cross_build.get("static_depends", None) toolchain = cross_build.get("toolchain", None) emulator = cross_build.get("emulator", None) # Both static_depends and toochain are mandatory for cross builds if not static_depends: raise AssertionError( "`static_depends` configuration is required for cross builds") if not toolchain: raise AssertionError( "`toolchain` configuration is required for cross builds") self.build_steps.append( { "bin": str(self.project_root.joinpath("contrib/devtools/build_depends.sh")), "args": [static_depends], } ) toolchain_file = self.project_root.joinpath( f"cmake/platforms/{toolchain}.cmake" ) self.cmake_flags.append( f"-DCMAKE_TOOLCHAIN_FILE={str(toolchain_file)}" ) if emulator: self.cmake_flags.append( f"-DCMAKE_CROSSCOMPILING_EMULATOR={shutil.which(emulator)}" ) # Configure using cmake. self.build_steps.append( { "bin": "cmake", "args": ["-G", generator_name, str(self.project_root)] + self.cmake_flags, } ) for target_group in targets: self.build_steps.append( { "bin": generator_command, "args": generator_flags + target_group, } ) # If a post build script is defined, add it as a last step post_build = self.config.get("post_build", None) if post_build: script_file = self.build_directory.joinpath("post_build.sh") self.create_script_file(script_file, post_build) self.build_steps.append( { "bin": str(script_file), "args": [], } ) def get(self, key, default): return self.config.get(key, default) class UserBuild(): def __init__(self, configuration): self.configuration = configuration build_directory = self.configuration.build_directory self.artifact_dir = build_directory.joinpath("artifacts") # Build 2 log files: # - the full log will contain all unfiltered content # - the clean log will contain the same filtered content as what is # printed to stdout. This filter is done in print_line_to_logs(). self.logs = {} self.logs["clean_log"] = build_directory.joinpath( "build.clean.log") self.logs["full_log"] = build_directory.joinpath("build.full.log") # Clean the build directory before any build step is run. if self.configuration.build_directory.is_dir(): shutil.rmtree(self.configuration.build_directory) self.configuration.build_directory.mkdir(exist_ok=True, parents=True) def copy_artifacts(self, artifacts): # Make sure the artifact directory always exists. It is created before # the build is run (to let the build install things to it) but since we # have no control on what is being executed, it might very well be # deleted by the build as well. This can happen when the artifacts # are located in the build directory and the build calls git clean. self.artifact_dir.mkdir(exist_ok=True) # Find and copy artifacts. # The source is relative to the build tree, the destination relative to # the artifact directory. # The artifact directory is located in the build directory tree, results # from it needs to be excluded from the glob matches to prevent infinite # recursion. for pattern, dest in artifacts.items(): matches = [m for m in sorted(self.configuration.build_directory.glob( pattern)) if self.artifact_dir not in m.parents and self.artifact_dir != m] dest = self.artifact_dir.joinpath(dest) # Pattern did not match if not matches: continue # If there is a single file, destination is the new file path if len(matches) == 1 and matches[0].is_file(): # Create the parent directories as needed dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(matches[0], dest) continue # If there are multiple files or a single directory, destination is a # directory. dest.mkdir(parents=True, exist_ok=True) for match in matches: if match.is_file(): shutil.copy2(match, dest) else: # FIXME after python => 3.8 is enforced, avoid the # try/except block and use dirs_exist_ok=True instead. try: shutil.copytree(match, dest.joinpath(match.name)) except FileExistsError: pass def print_line_to_logs(self, line): # Always print to the full log with open(self.logs["full_log"], 'a', encoding='utf-8') as log: log.write(line) # Discard the set -x bash output for stdout and the clean log if not line.startswith("+"): with open(self.logs["clean_log"], 'a', encoding='utf-8') as log: log.write(line) print(line.rstrip()) async def process_stdout(self, stdout): while True: try: line = await stdout.readline() line = line.decode('utf-8') if not line: break self.print_line_to_logs(line) except ValueError: self.print_line_to_logs( "--- Line discarded due to StreamReader overflow ---" ) continue def run_process(self, binary, args=None): args = args if args is not None else [] return asyncio.create_subprocess_exec( *([binary] + args), # Buffer limit is 64KB by default, but we need a larger buffer: limit=1024 * 256, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, cwd=self.configuration.build_directory, env={ **os.environ, **self.configuration.environment_variables, **self.configuration.get("env", {}), "ARTIFACT_DIR": str(self.artifact_dir), "CMAKE_FLAGS": " ".join(self.configuration.cmake_flags), }, ) async def run_build(self, binary, args=None): args = args if args is not None else [] proc = await self.run_process(binary, args) logging_task = asyncio.ensure_future(self.process_stdout(proc.stdout)) # Block until the process is finished result = await proc.wait() # Wait up to a few seconds for logging to flush. Normally, this will # finish immediately. try: await asyncio.wait_for(logging_task, timeout=5) except asyncio.TimeoutError: self.print_line_to_logs( "Warning: Timed out while waiting for logging to flush. Some log lines may be missing.") return result async def wait_for_build(self, timeout, args=None): args = args if args is not None else [] message = f"Build {self.configuration.name} completed successfully" try: for step in self.configuration.build_steps: return_code = await asyncio.wait_for(self.run_build(step["bin"], step["args"]), timeout) if return_code != 0: message = "Build {} failed with exit code {}".format( self.configuration.name, return_code ) return except asyncio.TimeoutError: message = "Build {} timed out after {:.1f}s".format( self.configuration.name, round(timeout, 1) ) # The process is killed, set return code to 128 + 9 (SIGKILL) = 137 return_code = 137 finally: self.print_line_to_logs(message) build_directory = self.configuration.build_directory # Always add the build logs to the root of the artifacts artifacts = { **self.configuration.get("artifacts", {}), str(self.logs["full_log"].relative_to(build_directory)): "", str(self.logs["clean_log"].relative_to(build_directory)): "", str(self.configuration.junit_reports_dir.relative_to(build_directory)): "", str(self.configuration.test_logs_dir.relative_to(build_directory)): "", str(self.configuration.functional_test_logs.relative_to(build_directory)): "functional", } self.copy_artifacts(artifacts) return (return_code, message) def run(self, args=None): args = args if args is not None else [] if self.artifact_dir.is_dir(): shutil.rmtree(self.artifact_dir) self.artifact_dir.mkdir(exist_ok=True) self.configuration.create_build_steps(self.artifact_dir) return_code, message = asyncio.run( self.wait_for_build( self.configuration.get( "timeout", DEFAULT_TIMEOUT)) ) return (return_code, message) class TeamcityBuild(UserBuild): def __init__(self, configuration): super().__init__(configuration) # This accounts for the volume mapping from the container. # Our local /results is mapped to some relative ./results on the host, # so we use /results/artifacts to copy our files but results/artifacts as # an artifact path for teamcity. # TODO abstract out the volume mapping self.artifact_dir = Path("/results/artifacts") self.teamcity_messages = TeamcityServiceMessages() def copy_artifacts(self, artifacts): super().copy_artifacts(artifacts) # Start loading the junit reports. junit_reports_pattern = f"{str(self.artifact_dir.relative_to('/'))}/junit/*.xml" self.teamcity_messages.importData("junit", junit_reports_pattern) # Instruct teamcity to upload our artifact directory artifact_path_pattern = "+:{}=>artifacts.tar.gz".format( str(self.artifact_dir.relative_to("/")) ) self.teamcity_messages.publishArtifacts(artifact_path_pattern) def run(self, args=None): args = args if args is not None else [] # Let the user know what build is being run. # This makes it easier to retrieve the info from the logs. self.teamcity_messages.customMessage( f"Starting build {self.configuration.name}", status="NORMAL" ) return_code, message = super().run() # Since we are aborting the build, make sure to flush everything first os.sync() if return_code != 0: # Add a build problem to the report self.teamcity_messages.buildProblem( message, # Let Teamcity calculate an ID from our message None ) # Change the final build message self.teamcity_messages.buildStatus( # Don't change the status, let Teamcity set it to failure None, message ) else: # Change the final build message but keep the original one as well self.teamcity_messages.buildStatus( # Don't change the status, let Teamcity set it to success None, f"{message} ({{build.status.text}})" ) return (return_code, message) def main(): script_dir = PurePath(os.path.realpath(__file__)).parent # By default search for a configuration file in the same directory as this # script. default_config_path = Path( script_dir.joinpath("build-configurations.yml") ) parser = argparse.ArgumentParser(description="Run a CI build") parser.add_argument( "build", help="The name of the build to run" ) parser.add_argument( "--config", "-c", help="Path to the builds configuration file (default to {})".format( str(default_config_path) ) ) args, unknown_args = parser.parse_known_args() # Check the configuration file exists config_path = Path(args.config) if args.config else default_config_path build_configuration = BuildConfiguration( script_dir, config_path, args.build) if is_running_under_teamcity(): build = TeamcityBuild(build_configuration) else: build = UserBuild(build_configuration) sys.exit(build.run(unknown_args)[0]) if __name__ == '__main__': main() diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py index f65b68c9d..9732d73c4 100755 --- a/contrib/tracing/p2p_monitor.py +++ b/contrib/tracing/p2p_monitor.py @@ -1,258 +1,259 @@ #!/usr/bin/env python3 """ Interactive bitcoind P2P network traffic monitor utilizing USDT and the net:inbound_message and net:outbound_message tracepoints. """ # This script demonstrates what USDT for Bitcoin ABC can enable. It uses BCC # (https://github.com/iovisor/bcc) to load a sandboxed eBPF program into the # Linux kernel (root privileges are required). The eBPF program attaches to two # statically defined tracepoints. The tracepoint 'net:inbound_message' is called # when a new P2P message is received, and 'net:outbound_message' is called on # outbound P2P messages. The eBPF program submits the P2P messages to # this script via a BPF ring buffer. import curses import sys from curses import panel, wrapper +from typing import List from bcc import BPF, USDT # BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into # a sandboxed Linux kernel VM. program = """ #include // Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). // I2P addresses are 60 chars + 6 chars for the port (':12345'). #define MAX_PEER_ADDR_LENGTH 62 + 6 #define MAX_PEER_CONN_TYPE_LENGTH 20 #define MAX_MSG_TYPE_LENGTH 20 struct p2p_message { u64 peer_id; char peer_addr[MAX_PEER_ADDR_LENGTH]; char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; char msg_type[MAX_MSG_TYPE_LENGTH]; u64 msg_size; }; // Two BPF perf buffers for pushing data (here P2P messages) to user space. BPF_PERF_OUTPUT(inbound_messages); BPF_PERF_OUTPUT(outbound_messages); int trace_inbound_message(struct pt_regs *ctx) { struct p2p_message msg = {}; bpf_usdt_readarg(1, ctx, &msg.peer_id); bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); bpf_usdt_readarg(5, ctx, &msg.msg_size); inbound_messages.perf_submit(ctx, &msg, sizeof(msg)); return 0; }; int trace_outbound_message(struct pt_regs *ctx) { struct p2p_message msg = {}; bpf_usdt_readarg(1, ctx, &msg.peer_id); bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); bpf_usdt_readarg(5, ctx, &msg.msg_size); outbound_messages.perf_submit(ctx, &msg, sizeof(msg)); return 0; }; """ class Message: """ A P2P network message. """ msg_type = "" size = 0 data = bytes() inbound = False def __init__(self, msg_type, size, inbound): self.msg_type = msg_type self.size = size self.inbound = inbound class Peer: """ A P2P network peer. """ id = 0 address = "" connection_type = "" - last_messages = list() + last_messages: List[Message] = [] total_inbound_msgs = 0 total_inbound_bytes = 0 total_outbound_msgs = 0 total_outbound_bytes = 0 def __init__(self, id, address, connection_type): self.id = id self.address = address self.connection_type = connection_type - self.last_messages = list() + self.last_messages = [] def add_message(self, message): self.last_messages.append(message) if len(self.last_messages) > 25: self.last_messages.pop(0) if message.inbound: self.total_inbound_bytes += message.size self.total_inbound_msgs += 1 else: self.total_outbound_bytes += message.size self.total_outbound_msgs += 1 def main(bitcoind_path): peers = {} bitcoind_with_usdts = USDT(path=str(bitcoind_path)) # attaching the trace functions defined in the BPF program to the # tracepoints bitcoind_with_usdts.enable_probe( probe="inbound_message", fn_name="trace_inbound_message") bitcoind_with_usdts.enable_probe( probe="outbound_message", fn_name="trace_outbound_message") bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) # BCC: perf buffer handle function for inbound_messages def handle_inbound(_, data, size): """ Inbound message handler. Called each time a message is submitted to the inbound_messages BPF table.""" event = bpf["inbound_messages"].event(data) if event.peer_id not in peers: peer = Peer(event.peer_id, event.peer_addr.decode( "utf-8"), event.peer_conn_type.decode("utf-8")) peers[peer.id] = peer peers[event.peer_id].add_message( Message(event.msg_type.decode("utf-8"), event.msg_size, True)) # BCC: perf buffer handle function for outbound_messages def handle_outbound(_, data, size): """ Outbound message handler. Called each time a message is submitted to the outbound_messages BPF table.""" event = bpf["outbound_messages"].event(data) if event.peer_id not in peers: peer = Peer(event.peer_id, event.peer_addr.decode( "utf-8"), event.peer_conn_type.decode("utf-8")) peers[peer.id] = peer peers[event.peer_id].add_message( Message(event.msg_type.decode("utf-8"), event.msg_size, False)) # BCC: add handlers to the inbound and outbound perf buffers bpf["inbound_messages"].open_perf_buffer(handle_inbound) bpf["outbound_messages"].open_perf_buffer(handle_outbound) wrapper(loop, bpf, peers) def loop(screen, bpf, peers): screen.nodelay(1) cur_list_pos = 0 win = curses.newwin(30, 70, 2, 7) win.erase() win.border(ord("|"), ord("|"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-")) info_panel = panel.new_panel(win) info_panel.hide() ROWS_AVALIABLE_FOR_LIST = curses.LINES - 5 scroll = 0 while True: try: # BCC: poll the perf buffers for new events or timeout after 50ms bpf.perf_buffer_poll(timeout=50) ch = screen.getch() if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len( peers.keys()) - 1 and info_panel.hidden(): cur_list_pos += 1 if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST: scroll += 1 if ((ch == curses.KEY_UP or ch == ord("k")) and cur_list_pos > 0 and info_panel.hidden()): cur_list_pos -= 1 if scroll > 0: scroll -= 1 if ch == ord('\n') or ch == ord(' '): if info_panel.hidden(): info_panel.show() else: info_panel.hide() screen.erase() render( screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel) curses.panel.update_panels() screen.refresh() except KeyboardInterrupt: exit() def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel): """ renders the list of peers and details panel This code is unrelated to USDT, BCC and BPF. """ header_format = "%6s %-20s %-20s %-22s %-67s" row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s" screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE) screen.addstr( 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL) screen.addstr(3, 0, header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE) peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST + scroll] for i, peer_id in enumerate(peer_list): peer = peers[peer_id] screen.addstr(i + 4, 0, row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes, peer.total_inbound_msgs, peer.total_inbound_bytes, peer.connection_type, peer.address), curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL) if i + scroll == cur_list_pos: info_window = info_panel.window() info_window.erase() info_window.border( ord("|"), ord("|"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-")) info_window.addstr( 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD) info_window.addstr( 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", curses.A_BOLD) for i, msg in enumerate(peer.last_messages): if msg.inbound: info_window.addstr( i + 3, 1, f"{f'<--- {msg.msg_type} ({msg.size} bytes) ':68s}", curses.A_NORMAL) else: info_window.addstr( i + 3, 1, f" {msg.msg_type} ({msg.size} byte) --->", curses.A_NORMAL) if __name__ == "__main__": if len(sys.argv) < 2: print("USAGE:", sys.argv[0], "path/to/bitcoind") exit() path = sys.argv[1] main(path)