diff --git a/.arclint b/.arclint index 5d43f06fa..f9df469c8 100644 --- a/.arclint +++ b/.arclint @@ -1,343 +1,340 @@ { "linters": { "generated": { "type": "generated" }, "clang-format": { "type": "clang-format", "version": ">=12.0", "bin": [ "clang-format-12", "clang-format" ], "include": "(^(src|chronik)/.*\\.(h|c|cpp|mm)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "autopep8": { "type": "autopep8", "version": ">=1.3.4", - "include": "(^contrib/.*\\.py$)", - "exclude": [ - "(^contrib/gitian-builder/)", - "(^contrib/apple-sdk-tools/)" - ], + "include": "(^contrib/(buildbot|devtools)/.*\\.py$)", "flags": [ "--aggressive", "--ignore=W503,W504", "--max-line-length=88" ] }, "black": { "type": "black", "version": ">=23.0.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^contrib/)" + "(^contrib/devtools/)", + "(^contrib/buildbot/)" ], "flags": [ "--preview" ] }, "flake8": { "type": "flake8", "version": ">=5.0", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ], "flags": [ "--ignore=A003,E203,E303,E305,E501,E704,W503,W504", "--require-plugins=flake8-comprehensions,flake8-builtins" ] }, "lint-format-strings": { "type": "lint-format-strings", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/test/fuzz/strprintf.cpp$)" ] }, "check-doc": { "type": "check-doc", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)" }, "lint-tests": { "type": "lint-tests", "include": "(^src/(seeder/|rpc/|wallet/)?test/.*\\.(cpp)$)" }, "phpcs": { "type": "phpcs", "include": "(\\.php$)", "exclude": [ "(^arcanist/__phutil_library_.+\\.php$)" ], "phpcs.standard": "arcanist/phpcs.xml" }, "lint-locale-dependence": { "type": "lint-locale-dependence", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes/|leveldb/|secp256k1/|tinyformat.h|univalue/))", "(^src/bench/nanobench.h$)" ] }, "lint-cheader": { "type": "lint-cheader", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "spelling": { "type": "spelling", "exclude": [ "(^build-aux/m4/)", "(^depends/)", "(^doc/release-notes/)", "(^contrib/gitian-builder/)", "(^src/(qt/locale|secp256k1|univalue|leveldb)/)", "(^test/lint/dictionary/)", "(package-lock.json)" ], "spelling.dictionaries": [ "test/lint/dictionary/english.json" ] }, "lint-assert-with-side-effects": { "type": "lint-assert-with-side-effects", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-quotes": { "type": "lint-include-quotes", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-include-guard": { "type": "lint-include-guard", "include": "(^(src|chronik)/.*\\.h$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/tinyformat.h$)" ] }, "lint-include-source": { "type": "lint-include-source", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-std-chrono": { "type": "lint-std-chrono", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-stdint": { "type": "lint-stdint", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/assumptions.h$)" ] }, "lint-source-filename": { "type": "lint-source-filename", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-boost-dependencies": { "type": "lint-boost-dependencies", "include": "(^(src|chronik)/.*\\.(h|cpp)$)" }, "lint-python-encoding": { "type": "lint-python-encoding", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-python-shebang": { "type": "lint-python-shebang", "include": "(\\.py$)", "exclude": [ "(__init__\\.py$)", "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "lint-bash-shebang": { "type": "lint-bash-shebang", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)" ] }, "shellcheck": { "type": "shellcheck", "version": ">=0.7.0", "flags": [ "--external-sources", "--source-path=SCRIPTDIR" ], "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)" ] }, "lint-shell-locale": { "type": "lint-shell-locale", "include": "(\\.sh$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue)/)", "(^cmake/utils/log-and-print-on-failure.sh)" ] }, "lint-cpp-void-parameters": { "type": "lint-cpp-void-parameters", "include": "(^(src|chronik)/.*\\.(h|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)", "(^src/compat/glibc_compat.cpp$)" ] }, "lint-logs": { "type": "lint-logs", "include": "(^(src|chronik)/.*\\.(h|cpp|rs)$)" }, "lint-qt": { "type": "lint-qt", "include": "(^src/qt/.*\\.(h|cpp)$)", "exclude": [ "(^src/qt/(locale|forms|res)/)" ] }, "lint-doxygen": { "type": "lint-doxygen", "include": "(^(src|chronik)/.*\\.(h|c|cpp)$)", "exclude": [ "(^src/(crypto/ctaes|secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "lint-whitespace": { "type": "lint-whitespace", "include": "(\\.(ac|am|cmake|conf|in|include|json|m4|md|openrc|php|pl|rs|sh|txt|yml)$)", "exclude": [ "(^contrib/gitian-builder/)", "(^src/(secp256k1|univalue|leveldb)/)", "(^src/bench/nanobench.h$)" ] }, "yamllint": { "type": "yamllint", "include": "(\\.(yml|yaml)$)", "exclude": "(^src/(secp256k1|univalue|leveldb)/)" }, "lint-check-nonfatal": { "type": "lint-check-nonfatal", "include": [ "(^src/rpc/.*\\.(h|c|cpp)$)", "(^src/wallet/rpc*.*\\.(h|c|cpp)$)" ], "exclude": "(^src/rpc/server.cpp)" }, "lint-markdown": { "type": "lint-markdown", "include": [ "(\\.md$)" ], "exclude": "(^contrib/gitian-builder/)" }, "lint-python-mypy": { "type": "lint-python-mypy", "version": ">=0.910", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", "(^contrib/macdeploy/)" ], "flags": [ "--ignore-missing-imports", "--install-types", "--non-interactive" ] }, "lint-python-mutable-default": { "type": "lint-python-mutable-default", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "prettier": { "type": "prettier", "version": ">=2.6.0", "include": [ "(^cashtab/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)", "(^web/.*\\.(css|html|js|json|jsx|md|scss|ts|tsx)$)" ], "exclude": "(^web/.*/translations/.*\\.json$)" }, "lint-python-isort": { "type": "lint-python-isort", "version": ">=5.6.4", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] }, "rustfmt": { "type": "rustfmt", "version": ">=1.5.1", "include": "(\\.rs$)" }, "eslint": { "type": "eslint", "version": ">=8.0.0", "include": [ "(cashtab/.*\\.js$)", "(apps/alias-server/.*\\.js$)", "(modules/ecashaddrjs/.*\\.js$)", "(apps/ecash-herald/.*\\.js$)", "(modules/chronik-client/.*\\.(js|jsx|ts|tsx)$)" ] }, "lint-python-flynt": { "type": "lint-python-flynt", "version": ">=0.78", "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)" ] } } } diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py index e3c3b7d6c..4327e769b 100755 --- a/contrib/gitian-build.py +++ b/contrib/gitian-build.py @@ -1,315 +1,616 @@ #!/usr/bin/env python3 # Copyright (c) 2019-2020 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import argparse import multiprocessing import os import subprocess import sys def setup(): global args, workdir - programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget'] + programs = ["ruby", "git", "apt-cacher-ng", "make", "wget"] if args.kvm: - programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils'] + programs += ["python-vm-builder", "qemu-kvm", "qemu-utils"] elif args.docker: - dockers = ['docker.io', 'docker-ce'] + dockers = ["docker.io", "docker-ce"] for i in dockers: - return_code = subprocess.call( - ['sudo', 'apt-get', 'install', '-qq', i]) + return_code = subprocess.call(["sudo", "apt-get", "install", "-qq", i]) if return_code == 0: break if return_code != 0: - print('Cannot find any way to install docker', file=sys.stderr) + print("Cannot find any way to install docker", file=sys.stderr) exit(1) else: - programs += ['lxc', 'debootstrap'] - subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) - if not os.path.isdir('gitian-builder'): + programs += ["lxc", "debootstrap"] + subprocess.check_call(["sudo", "apt-get", "install", "-qq"] + programs) + if not os.path.isdir("gitian-builder"): subprocess.check_call( - ['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) - if not os.path.isdir('bitcoin-abc'): + ["git", "clone", "https://github.com/devrandom/gitian-builder.git"] + ) + if not os.path.isdir("bitcoin-abc"): subprocess.check_call( - ['git', 'clone', 'https://github.com/Bitcoin-ABC/bitcoin-abc.git']) - os.chdir('gitian-builder') - make_image_prog = ['bin/make-base-vm', - '--distro', 'debian', '--suite', 'buster', '--arch', 'amd64'] + ["git", "clone", "https://github.com/Bitcoin-ABC/bitcoin-abc.git"] + ) + os.chdir("gitian-builder") + make_image_prog = [ + "bin/make-base-vm", + "--distro", + "debian", + "--suite", + "buster", + "--arch", + "amd64", + ] if args.docker: - make_image_prog += ['--docker'] + make_image_prog += ["--docker"] elif not args.kvm: - make_image_prog += ['--lxc'] + make_image_prog += ["--lxc"] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call( - ['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) - print('Reboot is required') + ["sudo", "sed", "-i", "s/lxcbr0/br0/", "/etc/default/lxc-net"] + ) + print("Reboot is required") exit(0) def build(): global args, workdir - base_output_dir = 'bitcoin-binaries/' + args.version - os.makedirs(base_output_dir + '/src', exist_ok=True) - print('\nBuilding Dependencies\n') - os.chdir('gitian-builder') - os.makedirs('inputs', exist_ok=True) - - subprocess.check_call(['make', '-C', '../bitcoin-abc/depends', - 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) - - output_dir_src = '../' + base_output_dir + '/src' + base_output_dir = "bitcoin-binaries/" + args.version + os.makedirs(base_output_dir + "/src", exist_ok=True) + print("\nBuilding Dependencies\n") + os.chdir("gitian-builder") + os.makedirs("inputs", exist_ok=True) + + subprocess.check_call( + [ + "make", + "-C", + "../bitcoin-abc/depends", + "download", + "SOURCES_PATH=" + os.getcwd() + "/cache/common", + ] + ) + + output_dir_src = "../" + base_output_dir + "/src" if args.linux: - print('\nCompiling ' + args.version + ' Linux') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin=' + args.commit, - '--url', 'bitcoin=' + args.url, '../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + - '-linux', '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml']) - output_dir_linux = '../' + base_output_dir + '/linux' + print("\nCompiling " + args.version + " Linux") + subprocess.check_call( + [ + "bin/gbuild", + "-j", + args.jobs, + "-m", + args.memory, + "--commit", + "bitcoin=" + args.commit, + "--url", + "bitcoin=" + args.url, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-linux", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml", + ] + ) + output_dir_linux = "../" + base_output_dir + "/linux" os.makedirs(output_dir_linux, exist_ok=True) subprocess.check_call( - 'mv build/out/bitcoin-*.tar.gz ' + output_dir_linux, shell=True) + "mv build/out/bitcoin-*.tar.gz " + output_dir_linux, shell=True + ) subprocess.check_call( - 'mv build/out/src/bitcoin-*.tar.gz ' + output_dir_src, shell=True) + "mv build/out/src/bitcoin-*.tar.gz " + output_dir_src, shell=True + ) subprocess.check_call( - 'mv result/bitcoin-*-linux-res.yml ' + output_dir_linux, shell=True) + "mv result/bitcoin-*-linux-res.yml " + output_dir_linux, shell=True + ) if args.windows: - print('\nCompiling ' + args.version + ' Windows') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin=' + args.commit, - '--url', 'bitcoin=' + args.url, '../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + - '-win-unsigned', '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml']) - output_dir_win = '../' + base_output_dir + '/win' + print("\nCompiling " + args.version + " Windows") + subprocess.check_call( + [ + "bin/gbuild", + "-j", + args.jobs, + "-m", + args.memory, + "--commit", + "bitcoin=" + args.commit, + "--url", + "bitcoin=" + args.url, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-win-unsigned", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml", + ] + ) + output_dir_win = "../" + base_output_dir + "/win" os.makedirs(output_dir_win, exist_ok=True) subprocess.check_call( - 'mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/', shell=True) + "mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/", shell=True + ) subprocess.check_call( - 'mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe ' + output_dir_win, shell=True) + "mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe " + output_dir_win, + shell=True, + ) subprocess.check_call( - 'mv build/out/src/bitcoin-*.tar.gz ' + output_dir_src, shell=True) + "mv build/out/src/bitcoin-*.tar.gz " + output_dir_src, shell=True + ) subprocess.check_call( - 'mv result/bitcoin-*-win-res.yml ' + output_dir_win, shell=True) + "mv result/bitcoin-*-win-res.yml " + output_dir_win, shell=True + ) if args.macos: - print('\nCompiling ' + args.version + ' MacOS') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin=' + args.commit, - '--url', 'bitcoin=' + args.url, '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + - '-osx-unsigned', '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml']) - output_dir_osx = '../' + base_output_dir + '/osx' + print("\nCompiling " + args.version + " MacOS") + subprocess.check_call( + [ + "bin/gbuild", + "-j", + args.jobs, + "-m", + args.memory, + "--commit", + "bitcoin=" + args.commit, + "--url", + "bitcoin=" + args.url, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-osx-unsigned", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml", + ] + ) + output_dir_osx = "../" + base_output_dir + "/osx" os.makedirs(output_dir_osx, exist_ok=True) subprocess.check_call( - 'mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/', shell=True) + "mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/", shell=True + ) subprocess.check_call( - 'mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg ' + output_dir_osx, shell=True) + "mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg " + output_dir_osx, + shell=True, + ) subprocess.check_call( - 'mv build/out/src/bitcoin-*.tar.gz ' + output_dir_src, shell=True) + "mv build/out/src/bitcoin-*.tar.gz " + output_dir_src, shell=True + ) subprocess.check_call( - 'mv result/bitcoin-*-osx-res.yml ' + output_dir_osx, shell=True) + "mv result/bitcoin-*-osx-res.yml " + output_dir_osx, shell=True + ) os.chdir(workdir) if args.commit_files: - print('\nCommitting ' + args.version + ' Unsigned Sigs\n') - os.chdir('gitian.sigs') + print("\nCommitting " + args.version + " Unsigned Sigs\n") + os.chdir("gitian.sigs") + subprocess.check_call(["git", "add", args.version + "-linux/" + args.signer]) subprocess.check_call( - ['git', 'add', args.version + '-linux/' + args.signer]) + ["git", "add", args.version + "-win-unsigned/" + args.signer] + ) subprocess.check_call( - ['git', 'add', args.version + '-win-unsigned/' + args.signer]) + ["git", "add", args.version + "-osx-unsigned/" + args.signer] + ) subprocess.check_call( - ['git', 'add', args.version + '-osx-unsigned/' + args.signer]) - subprocess.check_call( - ['git', 'commit', '-m', 'Add ' + args.version + ' unsigned sigs for ' + args.signer]) + [ + "git", + "commit", + "-m", + "Add " + args.version + " unsigned sigs for " + args.signer, + ] + ) os.chdir(workdir) def sign(): global args, workdir - os.chdir('gitian-builder') + os.chdir("gitian-builder") if args.windows: - print('\nSigning ' + args.version + ' Windows') - subprocess.check_call('cp inputs/bitcoin-' + args.version + - '-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz', shell=True) - subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature=' + args.commit, - '../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + '-win-signed', - '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml']) + print("\nSigning " + args.version + " Windows") + subprocess.check_call( + "cp inputs/bitcoin-" + + args.version + + "-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz", + shell=True, + ) + subprocess.check_call( + [ + "bin/gbuild", + "-i", + "--commit", + "signature=" + args.commit, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-win-signed", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml", + ] + ) subprocess.check_call( - 'mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/' + args.version, shell=True) + "mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/" + args.version, + shell=True, + ) if args.macos: - print('\nSigning ' + args.version + ' MacOS') - subprocess.check_call('cp inputs/bitcoin-' + args.version + - '-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz', shell=True) - subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature=' + args.commit, - '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + '-osx-signed', - '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml']) - subprocess.check_call('mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/' + - args.version + '/bitcoin-' + args.version + '-osx.dmg', shell=True) + print("\nSigning " + args.version + " MacOS") + subprocess.check_call( + "cp inputs/bitcoin-" + + args.version + + "-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz", + shell=True, + ) + subprocess.check_call( + [ + "bin/gbuild", + "-i", + "--commit", + "signature=" + args.commit, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-osx-signed", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml", + ] + ) + subprocess.check_call( + "mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/" + + args.version + + "/bitcoin-" + + args.version + + "-osx.dmg", + shell=True, + ) os.chdir(workdir) if args.commit_files: - print('\nCommitting ' + args.version + ' Signed Sigs\n') - os.chdir('gitian.sigs') + print("\nCommitting " + args.version + " Signed Sigs\n") + os.chdir("gitian.sigs") subprocess.check_call( - ['git', 'add', args.version + '-win-signed/' + args.signer]) + ["git", "add", args.version + "-win-signed/" + args.signer] + ) subprocess.check_call( - ['git', 'add', args.version + '-osx-signed/' + args.signer]) - subprocess.check_call(['git', 'commit', '-a', '-m', 'Add ' + - args.version + ' signed binary sigs for ' + args.signer]) + ["git", "add", args.version + "-osx-signed/" + args.signer] + ) + subprocess.check_call( + [ + "git", + "commit", + "-a", + "-m", + "Add " + args.version + " signed binary sigs for " + args.signer, + ] + ) os.chdir(workdir) def verify(): global args, workdir - os.chdir('gitian-builder') - - print('\nVerifying v' + args.version + ' Linux\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-linux', '../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml']) - print('\nVerifying v' + args.version + ' Windows\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-win-unsigned', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml']) - print('\nVerifying v' + args.version + ' MacOS\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-osx-unsigned', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml']) - print('\nVerifying v' + args.version + ' Signed Windows\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-win-signed', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml']) - print('\nVerifying v' + args.version + ' Signed MacOS\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-osx-signed', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml']) + os.chdir("gitian-builder") + + print("\nVerifying v" + args.version + " Linux\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-linux", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml", + ] + ) + print("\nVerifying v" + args.version + " Windows\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-win-unsigned", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml", + ] + ) + print("\nVerifying v" + args.version + " MacOS\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-osx-unsigned", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml", + ] + ) + print("\nVerifying v" + args.version + " Signed Windows\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-win-signed", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml", + ] + ) + print("\nVerifying v" + args.version + " Signed MacOS\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-osx-signed", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml", + ] + ) os.chdir(workdir) def main(): global args, workdir num_cpus = multiprocessing.cpu_count() - parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version') - parser.add_argument('-c', '--commit', action='store_true', dest='commit', - help='Indicate that the version argument is for a commit or branch') - parser.add_argument('-p', '--pull', action='store_true', dest='pull', - help='Indicate that the version argument is the number of a github repository pull request') - parser.add_argument('-u', '--url', dest='url', default='https://github.com/Bitcoin-ABC/bitcoin-abc.git', - help='Specify the URL of the repository. Default is %(default)s') - parser.add_argument('-v', '--verify', action='store_true', - dest='verify', help='Verify the Gitian build') - parser.add_argument('-b', '--build', action='store_true', - dest='build', help='Do a Gitian build') - parser.add_argument('-s', '--sign', action='store_true', dest='sign', - help='Make signed binaries for Windows and MacOS') - parser.add_argument('-B', '--buildsign', action='store_true', - dest='buildsign', help='Build both signed and unsigned binaries') - parser.add_argument('-o', '--os', dest='os', default='lwm', - help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') - parser.add_argument('-j', '--jobs', dest='jobs', default=str(num_cpus), - help='Number of processes to use. Default %(default)s') - parser.add_argument('-m', '--memory', dest='memory', default='3500', - help='Memory to allocate in MiB. Default %(default)s') - parser.add_argument('-k', '--kvm', action='store_true', - dest='kvm', help='Use KVM instead of LXC') - parser.add_argument('-d', '--docker', action='store_true', - dest='docker', help='Use Docker instead of LXC') - parser.add_argument('-S', '--setup', action='store_true', dest='setup', - help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)') - parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', - help='Create the assert file for detached signing. Will not commit anything.') - parser.add_argument('-n', '--no-commit', action='store_false', - dest='commit_files', help='Do not commit anything to git') + parser = argparse.ArgumentParser(usage="%(prog)s [options] signer version") + parser.add_argument( + "-c", + "--commit", + action="store_true", + dest="commit", + help="Indicate that the version argument is for a commit or branch", + ) + parser.add_argument( + "-p", + "--pull", + action="store_true", + dest="pull", + help=( + "Indicate that the version argument is the number of a github repository" + " pull request" + ), + ) + parser.add_argument( + "-u", + "--url", + dest="url", + default="https://github.com/Bitcoin-ABC/bitcoin-abc.git", + help="Specify the URL of the repository. Default is %(default)s", + ) + parser.add_argument( + "-v", + "--verify", + action="store_true", + dest="verify", + help="Verify the Gitian build", + ) + parser.add_argument( + "-b", "--build", action="store_true", dest="build", help="Do a Gitian build" + ) + parser.add_argument( + "-s", + "--sign", + action="store_true", + dest="sign", + help="Make signed binaries for Windows and MacOS", + ) + parser.add_argument( + "-B", + "--buildsign", + action="store_true", + dest="buildsign", + help="Build both signed and unsigned binaries", + ) + parser.add_argument( + "-o", + "--os", + dest="os", + default="lwm", + help=( + "Specify which Operating Systems the build is for. Default is %(default)s." + " l for Linux, w for Windows, m for MacOS" + ), + ) + parser.add_argument( + "-j", + "--jobs", + dest="jobs", + default=str(num_cpus), + help="Number of processes to use. Default %(default)s", + ) + parser.add_argument( + "-m", + "--memory", + dest="memory", + default="3500", + help="Memory to allocate in MiB. Default %(default)s", + ) + parser.add_argument( + "-k", "--kvm", action="store_true", dest="kvm", help="Use KVM instead of LXC" + ) + parser.add_argument( + "-d", + "--docker", + action="store_true", + dest="docker", + help="Use Docker instead of LXC", + ) + parser.add_argument( + "-S", + "--setup", + action="store_true", + dest="setup", + help=( + "Set up the Gitian building environment. Uses LXC. If you want to use KVM," + " use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)" + ), + ) + parser.add_argument( + "-D", + "--detach-sign", + action="store_true", + dest="detach_sign", + help="Create the assert file for detached signing. Will not commit anything.", + ) parser.add_argument( - 'signer', help='GPG signer to sign each build assert file') + "-n", + "--no-commit", + action="store_false", + dest="commit_files", + help="Do not commit anything to git", + ) + parser.add_argument("signer", help="GPG signer to sign each build assert file") parser.add_argument( - 'version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') + "version", + help=( + "Version number, commit, or branch to build. If building a commit or" + " branch, the -c option must be specified" + ), + ) args = parser.parse_args() workdir = os.getcwd() - args.linux = 'l' in args.os - args.windows = 'w' in args.os - args.macos = 'm' in args.os + args.linux = "l" in args.os + args.windows = "w" in args.os + args.macos = "m" in args.os - args.is_bionic = b'bionic' in subprocess.check_output( - ['lsb_release', '-cs']) + args.is_bionic = b"bionic" in subprocess.check_output(["lsb_release", "-cs"]) if args.buildsign: args.build = True args.sign = True if args.kvm and args.docker: - raise Exception('Error: cannot have both kvm and docker') + raise Exception("Error: cannot have both kvm and docker") - args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' + args.sign_prog = "true" if args.detach_sign else "gpg --detach-sign" # Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know # that we use lxc or docker if args.docker: - os.environ['USE_DOCKER'] = '1' + os.environ["USE_DOCKER"] = "1" elif not args.kvm: - os.environ['USE_LXC'] = '1' - if 'GITIAN_HOST_IP' not in os.environ.keys(): - os.environ['GITIAN_HOST_IP'] = '10.0.3.1' - if 'LXC_GUEST_IP' not in os.environ.keys(): - os.environ['LXC_GUEST_IP'] = '10.0.3.5' + os.environ["USE_LXC"] = "1" + if "GITIAN_HOST_IP" not in os.environ.keys(): + os.environ["GITIAN_HOST_IP"] = "10.0.3.1" + if "LXC_GUEST_IP" not in os.environ.keys(): + os.environ["LXC_GUEST_IP"] = "10.0.3.5" # Disable for MacOS if no SDK found if args.macos and not os.path.isfile( - 'gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz'): - print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') + "gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz" + ): + print("Cannot build for MacOS, SDK does not exist. Will build for other OSes") args.macos = False script_name = os.path.basename(sys.argv[0]) # Signer and version shouldn't be empty - if args.signer == '': - print(script_name + ': Missing signer.') - print('Try ' + script_name + ' --help for more information') + if args.signer == "": + print(script_name + ": Missing signer.") + print("Try " + script_name + " --help for more information") exit(1) - if args.version == '': - print(script_name + ': Missing version.') - print('Try ' + script_name + ' --help for more information') + if args.version == "": + print(script_name + ": Missing version.") + print("Try " + script_name + " --help for more information") exit(1) # Add leading 'v' for tags if args.commit and args.pull: - raise Exception('Cannot have both commit and pull') - args.commit = ('' if args.commit else 'v') + args.version + raise Exception("Cannot have both commit and pull") + args.commit = ("" if args.commit else "v") + args.version if args.setup: setup() - os.chdir('bitcoin-abc') + os.chdir("bitcoin-abc") if args.pull: subprocess.check_call( - ['git', 'fetch', args.url, 'refs/pull/' + args.version + '/merge']) - os.chdir('../gitian-builder/inputs/bitcoin') + ["git", "fetch", args.url, "refs/pull/" + args.version + "/merge"] + ) + os.chdir("../gitian-builder/inputs/bitcoin") subprocess.check_call( - ['git', 'fetch', args.url, 'refs/pull/' + args.version + '/merge']) + ["git", "fetch", args.url, "refs/pull/" + args.version + "/merge"] + ) args.commit = subprocess.check_output( - ['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() - args.version = 'pull-' + args.version + ["git", "show", "-s", "--format=%H", "FETCH_HEAD"], + universal_newlines=True, + encoding="utf8", + ).strip() + args.version = "pull-" + args.version print(args.commit) - subprocess.check_call(['git', 'fetch']) - subprocess.check_call(['git', 'checkout', args.commit]) + subprocess.check_call(["git", "fetch"]) + subprocess.check_call(["git", "checkout", args.commit]) os.chdir(workdir) if args.build: build() if args.sign: sign() if args.verify: verify() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py index 732f78a33..af1445acd 100755 --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -1,345 +1,358 @@ #!/usr/bin/env python3 # # linearize-data.py: Construct a linear, no-fork version of the chain. # # Copyright (c) 2013-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from __future__ import division, print_function import datetime import hashlib import os import os.path import re import struct import sys import time from binascii import unhexlify from collections import namedtuple from typing import Any, Dict settings: Dict[str, Any] = {} def hex_switchEndian(s): - """ Switches the endianness of a hex string (in pairs of hex chars) """ - pairList = [s[i:i + 2].encode() for i in range(0, len(s), 2)] - return b''.join(pairList[::-1]).decode() + """Switches the endianness of a hex string (in pairs of hex chars)""" + pairList = [s[i : i + 2].encode() for i in range(0, len(s), 2)] + return b"".join(pairList[::-1]).decode() def uint32(x): - return x & 0xffffffff + return x & 0xFFFFFFFF def bytereverse(x): - return uint32((((x) << 24) | (((x) << 8) & 0x00ff0000) | - (((x) >> 8) & 0x0000ff00) | ((x) >> 24))) + return uint32( + ( + ((x) << 24) + | (((x) << 8) & 0x00FF0000) + | (((x) >> 8) & 0x0000FF00) + | ((x) >> 24) + ) + ) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): - word = struct.unpack('@I', in_buf[i:i + 4])[0] - out_words.append(struct.pack('@I', bytereverse(word))) - return b''.join(out_words) + word = struct.unpack("@I", in_buf[i : i + 4])[0] + out_words.append(struct.pack("@I", bytereverse(word))) + return b"".join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): - out_words.append(in_buf[i:i + 4]) + out_words.append(in_buf[i : i + 4]) out_words.reverse() - return b''.join(out_words) + return b"".join(out_words) def calc_hdr_hash(blk_hdr): hash1 = hashlib.sha256() hash1.update(blk_hdr) hash1_o = hash1.digest() hash2 = hashlib.sha256() hash2.update(hash1_o) hash2_o = hash2.digest() return hash2_o def calc_hash_str(blk_hdr): blockhash = calc_hdr_hash(blk_hdr) blockhash = bufreverse(blockhash) blockhash = wordreverse(blockhash) hash_str = blockhash.hex() return hash_str def get_blk_dt(blk_hdr): - members = struct.unpack(" self.maxOutSz): + if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz): self.outF.close() if self.setFileTime: os.utime(self.outFname, (int(time.time()), self.highTS)) self.outF = None self.outFname = None self.outFn = self.outFn + 1 self.outsz = 0 (blkDate, blkTS) = get_blk_dt(blk_hdr) if self.timestampSplit and (blkDate > self.lastDate): - print("New month " + blkDate.strftime("%Y-%m") + - " @ " + self.hash_str) + print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str) self.lastDate = blkDate if self.outF: self.outF.close() if self.setFileTime: os.utime(self.outFname, (int(time.time()), self.highTS)) self.outF = None self.outFname = None self.outFn = self.outFn + 1 self.outsz = 0 if not self.outF: if self.fileOutput: - self.outFname = self.settings['output_file'] + self.outFname = self.settings["output_file"] else: self.outFname = os.path.join( - self.settings['output'], f"blk{self.outFn:05d}.dat") + self.settings["output"], f"blk{self.outFn:05d}.dat" + ) print("Output file " + self.outFname) self.outF = open(self.outFname, "wb") self.outF.write(inhdr) self.outF.write(blk_hdr) self.outF.write(rawblock) self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock) self.blkCountOut = self.blkCountOut + 1 if blkTS > self.highTS: self.highTS = blkTS if (self.blkCountOut % 1000) == 0: - print('{} blocks scanned, {} blocks written (of {}, {:.1f}% complete)'.format( - self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex))) + print( + "{} blocks scanned, {} blocks written (of {}, {:.1f}% complete)".format( + self.blkCountIn, + self.blkCountOut, + len(self.blkindex), + 100.0 * self.blkCountOut / len(self.blkindex), + ) + ) def inFileName(self, fn): - return os.path.join(self.settings['input'], f"blk{fn:05d}.dat") + return os.path.join(self.settings["input"], f"blk{fn:05d}.dat") def fetchBlock(self, extent): - '''Fetch block contents from disk given extents''' + """Fetch block contents from disk given extents""" with open(self.inFileName(extent.fn), "rb") as f: f.seek(extent.offset) return f.read(extent.size) def copyOneBlock(self): - '''Find the next block to be written in the input, and copy it to the output.''' + """Find the next block to be written in the input, and copy it to the output.""" extent = self.blockExtents.pop(self.blkCountOut) if self.blkCountOut in self.outOfOrderData: # If the data is cached, use it from memory and remove from the # cache rawblock = self.outOfOrderData.pop(self.blkCountOut) self.outOfOrderSize -= len(rawblock) else: # Otherwise look up data on disk rawblock = self.fetchBlock(extent) self.writeBlock(extent.inhdr, extent.blkhdr, rawblock) def run(self): while self.blkCountOut < len(self.blkindex): if not self.inF: fname = self.inFileName(self.inFn) print("Input file " + fname) try: self.inF = open(fname, "rb") except IOError: print("Premature end of block data") return inhdr = self.inF.read(8) - if (not inhdr or (inhdr[0] == "\0")): + if not inhdr or (inhdr[0] == "\0"): self.inF.close() self.inF = None self.inFn = self.inFn + 1 continue inMagic = inhdr[:4] - if (inMagic != self.settings['netmagic']): + if inMagic != self.settings["netmagic"]: print("Invalid magic: " + inMagic.hex()) return inLenLE = inhdr[4:] su = struct.unpack(" # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # import os import os.path import re import shutil import stat import subprocess import sys import time from argparse import ArgumentParser from string import Template from typing import List, Optional # This is ported from the original macdeployqt with modifications class FrameworkInfo(object): def __init__(self): self.frameworkDirectory = "" self.frameworkName = "" self.frameworkPath = "" self.binaryDirectory = "" self.binaryName = "" self.binaryPath = "" self.version = "" self.installName = "" self.deployedInstallName = "" self.sourceFilePath = "" self.destinationDirectory = "" self.sourceResourcesDirectory = "" self.sourceVersionContentsDirectory = "" self.sourceContentsDirectory = "" self.destinationResourcesDirectory = "" self.destinationVersionContentsDirectory = "" def __eq__(self, other): if self.__class__ == other.__class__: return self.__dict__ == other.__dict__ else: return False def __str__(self): return f""" Framework name: {self.frameworkName} Framework directory: {self.frameworkDirectory} Framework path: {self.frameworkPath} Binary name: {self.binaryName} Binary directory: {self.binaryDirectory} Binary path: {self.binaryPath} Version: {self.version} Install name: {self.installName} Deployed install name: {self.deployedInstallName} Source file Path: {self.sourceFilePath} Deployed Directory (relative to bundle): {self.destinationDirectory} """ def isDylib(self): return self.frameworkName.endswith(".dylib") def isQtFramework(self): if self.isDylib(): return self.frameworkName.startswith("libQt") else: return self.frameworkName.startswith("Qt") reOLine = re.compile( - r'^(.+) \(compatibility version [0-9.]+, current version [0-9.]+\)$') + r"^(.+) \(compatibility version [0-9.]+, current version [0-9.]+\)$" + ) bundleFrameworkDirectory = "Contents/Frameworks" bundleBinaryDirectory = "Contents/MacOS" @classmethod - def fromOtoolLibraryLine(cls, line: str) -> Optional['FrameworkInfo']: + def fromOtoolLibraryLine(cls, line: str) -> Optional["FrameworkInfo"]: # Note: line must be trimmed if line == "": return None # Don't deploy system libraries (exception for libQtuitools and # libQtlucene). - if line.startswith("/System/Library/") or line.startswith( - "@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line): + if ( + line.startswith("/System/Library/") + or line.startswith("@executable_path") + or (line.startswith("/usr/lib/") and "libQt" not in line) + ): return None m = cls.reOLine.match(line) if m is None: raise RuntimeError("otool line could not be parsed: " + line) path = m.group(1) info = cls() info.sourceFilePath = path info.installName = path if path.endswith(".dylib"): dirname, filename = os.path.split(path) info.frameworkName = filename info.frameworkDirectory = dirname info.frameworkPath = path info.binaryDirectory = dirname info.binaryName = filename info.binaryPath = path info.version = "-" info.installName = path - info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName + info.deployedInstallName = ( + "@executable_path/../Frameworks/" + info.binaryName + ) info.sourceFilePath = path info.destinationDirectory = cls.bundleFrameworkDirectory else: parts = path.split("/") i = 0 # Search for the .framework directory for part in parts: if part.endswith(".framework"): break i += 1 if i == len(parts): raise RuntimeError( - "Could not find .framework or .dylib in otool line: " + line) + "Could not find .framework or .dylib in otool line: " + line + ) info.frameworkName = parts[i] info.frameworkDirectory = "/".join(parts[:i]) info.frameworkPath = os.path.join( - info.frameworkDirectory, info.frameworkName) + info.frameworkDirectory, info.frameworkName + ) info.binaryName = parts[i + 3] - info.binaryDirectory = "/".join(parts[i + 1:i + 3]) - info.binaryPath = os.path.join( - info.binaryDirectory, info.binaryName) + info.binaryDirectory = "/".join(parts[i + 1 : i + 3]) + info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName) info.version = parts[i + 2] - info.deployedInstallName = "@executable_path/../Frameworks/" + \ - os.path.join(info.frameworkName, info.binaryPath) + info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join( + info.frameworkName, info.binaryPath + ) info.destinationDirectory = os.path.join( - cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory) + cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory + ) info.sourceResourcesDirectory = os.path.join( - info.frameworkPath, "Resources") - info.sourceContentsDirectory = os.path.join( - info.frameworkPath, "Contents") + info.frameworkPath, "Resources" + ) + info.sourceContentsDirectory = os.path.join(info.frameworkPath, "Contents") info.sourceVersionContentsDirectory = os.path.join( - info.frameworkPath, "Versions", info.version, "Contents") + info.frameworkPath, "Versions", info.version, "Contents" + ) info.destinationResourcesDirectory = os.path.join( - cls.bundleFrameworkDirectory, info.frameworkName, "Resources") + cls.bundleFrameworkDirectory, info.frameworkName, "Resources" + ) info.destinationVersionContentsDirectory = os.path.join( cls.bundleFrameworkDirectory, info.frameworkName, "Versions", info.version, - "Contents") + "Contents", + ) return info class ApplicationBundleInfo(object): def __init__(self, path: str): self.path = path appName = "BitcoinABC-Qt" self.binaryPath = os.path.join(path, "Contents", "MacOS", appName) if not os.path.exists(self.binaryPath): raise RuntimeError("Could not find bundle binary for " + path) self.resourcesPath = os.path.join(path, "Contents", "Resources") self.pluginPath = os.path.join(path, "Contents", "PlugIns") class DeploymentInfo(object): def __init__(self): self.qtPath = None self.pluginPath = None self.deployedFrameworks = [] def detectQtPath(self, frameworkDirectory: str): parentDir = os.path.dirname(frameworkDirectory) if os.path.exists(os.path.join(parentDir, "translations")): # Classic layout, e.g. "/usr/local/Trolltech/Qt-4.x.x" self.qtPath = parentDir else: self.qtPath = os.getenv("QTDIR", None) if self.qtPath is not None: pluginPath = os.path.join(self.qtPath, "plugins") if os.path.exists(pluginPath): self.pluginPath = pluginPath def usesFramework(self, name: str) -> bool: nameDot = f"{name}." libNameDot = f"lib{name}." for framework in self.deployedFrameworks: if framework.endswith(".framework"): if framework.startswith(nameDot): return True elif framework.endswith(".dylib"): if framework.startswith(libNameDot): return True return False def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]: if verbose >= 3: print("Inspecting with otool: " + binaryPath) otoolbin = os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, - "-L", - binaryPath], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + otool = subprocess.Popen( + [otoolbin, "-L", binaryPath], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) o_stdout, o_stderr = otool.communicate() if otool.returncode != 0: if verbose >= 1: sys.stderr.write(o_stderr) sys.stderr.flush() - raise RuntimeError( - f"otool failed with return code {otool.returncode}") + raise RuntimeError(f"otool failed with return code {otool.returncode}") otoolLines = o_stdout.split("\n") otoolLines.pop(0) # First line is the inspected binary if ".framework" in binaryPath or binaryPath.endswith(".dylib"): # Frameworks and dylibs list themselves as a dependency. otoolLines.pop(0) libraries = [] for line in otoolLines: line = line.replace("@loader_path", os.path.dirname(binaryPath)) info = FrameworkInfo.fromOtoolLibraryLine(line.strip()) if info is not None: if verbose >= 3: print("Found framework:") print(info) libraries.append(info) return libraries def runInstallNameTool(action: str, *args): installnametoolbin = os.getenv("INSTALLNAMETOOL", "install_name_tool") subprocess.check_call([installnametoolbin, "-" + action] + list(args)) -def changeInstallName(oldName: str, newName: str, - binaryPath: str, verbose: int): +def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int): if verbose >= 3: print("Using install_name_tool:") print(" in", binaryPath) print(" change reference", oldName) print(" to", newName) runInstallNameTool("change", oldName, newName, binaryPath) def changeIdentification(id_name: str, binaryPath: str, verbose: int): if verbose >= 3: print("Using install_name_tool:") print(" change identification in", binaryPath) print(" to", id_name) runInstallNameTool("id", id_name, binaryPath) def runStrip(binaryPath: str, verbose: int): stripbin = os.getenv("STRIP", "strip") if verbose >= 3: print("Using strip:") print(" stripped", binaryPath) subprocess.check_call([stripbin, "-x", binaryPath]) -def copyFramework(framework: FrameworkInfo, path: str, - verbose: int) -> Optional[str]: +def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional[str]: if framework.sourceFilePath.startswith("Qt"): # standard place for Nokia Qt installer's frameworks fromPath = "/Library/Frameworks/" + framework.sourceFilePath else: fromPath = framework.sourceFilePath toDir = os.path.join(path, framework.destinationDirectory) toPath = os.path.join(toDir, framework.binaryName) if not os.path.exists(fromPath): raise RuntimeError("No file at " + fromPath) if os.path.exists(toPath): return None # Already there if not os.path.exists(toDir): os.makedirs(toDir) shutil.copy2(fromPath, toPath) if verbose >= 3: print("Copied:", fromPath) print(" to:", toPath) permissions = os.stat(toPath) if not permissions.st_mode & stat.S_IWRITE: os.chmod(toPath, permissions.st_mode | stat.S_IWRITE) if not framework.isDylib(): # Copy resources for real frameworks - linkfrom = os.path.join( path, "Contents", "Frameworks", framework.frameworkName, "Versions", - "Current") + "Current", + ) linkto = framework.version if not os.path.exists(linkfrom): os.symlink(linkto, linkfrom) if verbose >= 2: print("Linked:", linkfrom, "->", linkto) fromResourcesDir = framework.sourceResourcesDirectory if os.path.exists(fromResourcesDir): - toResourcesDir = os.path.join( - path, framework.destinationResourcesDirectory) + toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory) shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True) if verbose >= 3: print("Copied resources:", fromResourcesDir) print(" to:", toResourcesDir) fromContentsDir = framework.sourceVersionContentsDirectory if not os.path.exists(fromContentsDir): fromContentsDir = framework.sourceContentsDirectory if os.path.exists(fromContentsDir): toContentsDir = os.path.join( - path, framework.destinationVersionContentsDirectory) + path, framework.destinationVersionContentsDirectory + ) shutil.copytree(fromContentsDir, toContentsDir, symlinks=True) if verbose >= 3: print("Copied Contents:", fromContentsDir) print(" to:", toContentsDir) # Copy qt_menu.nib (applies to non-framework layout) elif framework.frameworkName.startswith("libQtGui"): qtMenuNibSourcePath = os.path.join( - framework.frameworkDirectory, "Resources", "qt_menu.nib") + framework.frameworkDirectory, "Resources", "qt_menu.nib" + ) qtMenuNibDestinationPath = os.path.join( - path, "Contents", "Resources", "qt_menu.nib") + path, "Contents", "Resources", "qt_menu.nib" + ) if os.path.exists(qtMenuNibSourcePath) and not os.path.exists( - qtMenuNibDestinationPath): + qtMenuNibDestinationPath + ): shutil.copytree( - qtMenuNibSourcePath, - qtMenuNibDestinationPath, - symlinks=True) + qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True + ) if verbose >= 3: print("Copied for libQtGui:", qtMenuNibSourcePath) print(" to:", qtMenuNibDestinationPath) return toPath -def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, - verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: +def deployFrameworks( + frameworks: List[FrameworkInfo], + bundlePath: str, + binaryPath: str, + strip: bool, + verbose: int, + deploymentInfo: Optional[DeploymentInfo] = None, +) -> DeploymentInfo: if deploymentInfo is None: deploymentInfo = DeploymentInfo() while len(frameworks) > 0: framework = frameworks.pop(0) deploymentInfo.deployedFrameworks.append(framework.frameworkName) if verbose >= 2: print("Processing", framework.frameworkName, "...") # Get the Qt path from one of the Qt frameworks if deploymentInfo.qtPath is None and framework.isQtFramework(): deploymentInfo.detectQtPath(framework.frameworkDirectory) if framework.installName.startswith( - "@executable_path") or framework.installName.startswith(bundlePath): + "@executable_path" + ) or framework.installName.startswith(bundlePath): if verbose >= 2: print(framework.frameworkName, "already deployed, skipping.") continue # install_name_tool the new id into the binary changeInstallName( - framework.installName, - framework.deployedInstallName, - binaryPath, - verbose) + framework.installName, framework.deployedInstallName, binaryPath, verbose + ) # Copy framework to app bundle. deployedBinaryPath = copyFramework(framework, bundlePath, verbose) # Skip the rest if already was deployed. if deployedBinaryPath is None: continue if strip: runStrip(deployedBinaryPath, verbose) # install_name_tool it a new id. - changeIdentification( - framework.deployedInstallName, - deployedBinaryPath, - verbose) + changeIdentification(framework.deployedInstallName, deployedBinaryPath, verbose) # Check for framework dependencies dependencies = getFrameworks(deployedBinaryPath, verbose) for dependency in dependencies: changeInstallName( dependency.installName, dependency.deployedInstallName, deployedBinaryPath, - verbose) + verbose, + ) # Deploy framework if necessary. - if dependency.frameworkName not in deploymentInfo.deployedFrameworks and dependency not in frameworks: + if ( + dependency.frameworkName not in deploymentInfo.deployedFrameworks + and dependency not in frameworks + ): frameworks.append(dependency) return deploymentInfo def deployFrameworksForAppBundle( - applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int) -> DeploymentInfo: + applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int +) -> DeploymentInfo: frameworks = getFrameworks(applicationBundle.binaryPath, verbose) if len(frameworks) == 0 and verbose >= 1: print( "Warning: Could not find any external frameworks to deploy in {}.".format( - applicationBundle.path)) + applicationBundle.path + ) + ) return DeploymentInfo() else: return deployFrameworks( - frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose) - - -def deployPlugins(appBundleInfo: ApplicationBundleInfo, - deploymentInfo: DeploymentInfo, strip: bool, verbose: int): + frameworks, + applicationBundle.path, + applicationBundle.binaryPath, + strip, + verbose, + ) + + +def deployPlugins( + appBundleInfo: ApplicationBundleInfo, + deploymentInfo: DeploymentInfo, + strip: bool, + verbose: int, +): # Lookup available plugins, exclude unneeded plugins = [] if deploymentInfo.pluginPath is None: return for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath): pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath) if pluginDirectory == "designer": # Skip designer plugins continue elif pluginDirectory == "printsupport": # Skip printsupport plugins continue elif pluginDirectory == "imageformats": # Skip imageformats plugins continue elif pluginDirectory == "sqldrivers": # Deploy the sql plugins only if QtSql is in use if not deploymentInfo.usesFramework("QtSql"): continue elif pluginDirectory == "script": # Deploy the script plugins only if QtScript is in use if not deploymentInfo.usesFramework("QtScript"): continue elif pluginDirectory == "qmltooling" or pluginDirectory == "qml1tooling": # Deploy the qml plugins only if QtDeclarative is in use if not deploymentInfo.usesFramework("QtDeclarative"): continue elif pluginDirectory == "bearer": # Deploy the bearer plugins only if QtNetwork is in use if not deploymentInfo.usesFramework("QtNetwork"): continue elif pluginDirectory == "position": # Deploy the position plugins only if QtPositioning is in use if not deploymentInfo.usesFramework("QtPositioning"): continue elif pluginDirectory == "sensors" or pluginDirectory == "sensorgestures": # Deploy the sensor plugins only if QtSensors is in use if not deploymentInfo.usesFramework("QtSensors"): continue elif pluginDirectory == "audio" or pluginDirectory == "playlistformats": # Deploy the audio plugins only if QtMultimedia is in use if not deploymentInfo.usesFramework("QtMultimedia"): continue elif pluginDirectory == "mediaservice": # Deploy the mediaservice plugins only if QtMultimediaWidgets is in # use if not deploymentInfo.usesFramework("QtMultimediaWidgets"): continue elif pluginDirectory == "canbus": # Deploy the canbus plugins only if QtSerialBus is in use if not deploymentInfo.usesFramework("QtSerialBus"): continue elif pluginDirectory == "webview": # Deploy the webview plugins only if QtWebView is in use if not deploymentInfo.usesFramework("QtWebView"): continue elif pluginDirectory == "gamepads": # Deploy the webview plugins only if QtGamepad is in use if not deploymentInfo.usesFramework("QtGamepad"): continue elif pluginDirectory == "geoservices": # Deploy the webview plugins only if QtLocation is in use if not deploymentInfo.usesFramework("QtLocation"): continue elif pluginDirectory == "texttospeech": # Deploy the texttospeech plugins only if QtTextToSpeech is in use if not deploymentInfo.usesFramework("QtTextToSpeech"): continue elif pluginDirectory == "virtualkeyboard": # Deploy the virtualkeyboard plugins only if QtVirtualKeyboard is # in use if not deploymentInfo.usesFramework("QtVirtualKeyboard"): continue elif pluginDirectory == "sceneparsers": # Deploy the virtualkeyboard plugins only if Qt3DCore is in use if not deploymentInfo.usesFramework("Qt3DCore"): continue elif pluginDirectory == "renderplugins": # Deploy the renderplugins plugins only if Qt3DCore is in use if not deploymentInfo.usesFramework("Qt3DCore"): continue elif pluginDirectory == "geometryloaders": # Deploy the geometryloaders plugins only if Qt3DCore is in use if not deploymentInfo.usesFramework("Qt3DCore"): continue for pluginName in filenames: pluginPath = os.path.join(pluginDirectory, pluginName) if pluginName.endswith("_debug.dylib"): # Skip debug plugins continue - elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib": + elif ( + pluginPath == "imageformats/libqsvg.dylib" + or pluginPath == "iconengines/libqsvgicon.dylib" + ): # Deploy the svg plugins only if QtSvg is in use if not deploymentInfo.usesFramework("QtSvg"): continue elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib": # Deploy accessibility for Qt3Support only if the Qt3Support is # in use if not deploymentInfo.usesFramework("Qt3Support"): continue elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib": # Deploy the opengl graphicssystem plugin only if QtOpenGL is # in use if not deploymentInfo.usesFramework("QtOpenGL"): continue elif pluginPath == "accessible/libqtaccessiblequick.dylib": # Deploy the accessible qtquick plugin only if QtQuick is in # use if not deploymentInfo.usesFramework("QtQuick"): continue elif pluginPath == "platforminputcontexts/libqtvirtualkeyboardplugin.dylib": # Deploy the virtualkeyboardplugin plugin only if # QtVirtualKeyboard is in use if not deploymentInfo.usesFramework("QtVirtualKeyboard"): continue plugins.append((pluginDirectory, pluginName)) for pluginDirectory, pluginName in plugins: if verbose >= 2: - print( - "Processing plugin", - os.path.join( - pluginDirectory, - pluginName), - "...") + print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") sourcePath = os.path.join( - deploymentInfo.pluginPath, - pluginDirectory, - pluginName) - destinationDirectory = os.path.join( - appBundleInfo.pluginPath, pluginDirectory) + deploymentInfo.pluginPath, pluginDirectory, pluginName + ) + destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory) if not os.path.exists(destinationDirectory): os.makedirs(destinationDirectory) destinationPath = os.path.join(destinationDirectory, pluginName) shutil.copy2(sourcePath, destinationPath) if verbose >= 3: print("Copied:", sourcePath) print(" to:", destinationPath) if strip: runStrip(destinationPath, verbose) dependencies = getFrameworks(destinationPath, verbose) for dependency in dependencies: changeInstallName( dependency.installName, dependency.deployedInstallName, destinationPath, - verbose) + verbose, + ) # Deploy framework if necessary. if dependency.frameworkName not in deploymentInfo.deployedFrameworks: deployFrameworks( [dependency], appBundleInfo.path, destinationPath, strip, verbose, - deploymentInfo) + deploymentInfo, + ) qt_conf = """[Paths] Translations=Resources Plugins=PlugIns """ -ap = ArgumentParser(description="""Improved version of macdeployqt. +ap = ArgumentParser( + description="""Improved version of macdeployqt. Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. Note, that the "dist" folder will be deleted before deploying on each run. Optionally, Qt translation files (.qm) and additional resources can be added to the bundle. Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments to the codesign tool. -E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""") +E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""" +) -ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", - help="application bundle to be deployed") +ap.add_argument( + "app_bundle", + nargs=1, + metavar="app-bundle", + help="application bundle to be deployed", +) ap.add_argument( "-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", - help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug") + help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug", +) ap.add_argument( "-no-plugins", dest="plugins", action="store_false", default=True, - help="skip plugin deployment") + help="skip plugin deployment", +) ap.add_argument( "-no-strip", dest="strip", action="store_false", default=True, - help="don't run 'strip' on the binaries") + help="don't run 'strip' on the binaries", +) ap.add_argument( "-sign", dest="sign", action="store_true", default=False, - help="sign .app bundle with codesign tool") + help="sign .app bundle with codesign tool", +) ap.add_argument( "-dmg", nargs="?", const="", metavar="basename", - help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used") + help=( + "create a .dmg disk image; if basename is not specified, a camel-cased version" + " of the app name is used" + ), +) ap.add_argument( "-fancy", nargs=1, metavar="plist", default=[], - help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work") + help=( + "make a fancy looking disk image using the given plist file with instructions;" + " requires -dmg to work" + ), +) ap.add_argument( "-add-qt-tr", nargs=1, metavar="languages", default=[], - help="add Qt translation files to the bundle's resources; the language list must be separated with commas, not with whitespace") + help=( + "add Qt translation files to the bundle's resources; the language list must be" + " separated with commas, not with whitespace" + ), +) ap.add_argument( "-translations-dir", nargs=1, metavar="path", default=None, - help="Path to Qt's translation files") + help="Path to Qt's translation files", +) ap.add_argument( "-add-resources", nargs="+", metavar="path", default=[], - help="list of additional files or folders to be copied into the bundle's resources; must be the last argument") + help=( + "list of additional files or folders to be copied into the bundle's resources;" + " must be the last argument" + ), +) ap.add_argument( "-volname", nargs=1, metavar="volname", default=[], - help="custom volume name for dmg") + help="custom volume name for dmg", +) config = ap.parse_args() verbose = config.verbose[0] # ------------------------------------------------ app_bundle = config.app_bundle[0] if not os.path.exists(app_bundle): if verbose >= 1: - sys.stderr.write( - f"Error: Could not find app bundle \"{app_bundle}\"\n") + sys.stderr.write(f'Error: Could not find app bundle "{app_bundle}"\n') sys.exit(1) app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0] # ------------------------------------------------ translations_dir = None if config.translations_dir and config.translations_dir[0]: if os.path.exists(config.translations_dir[0]): translations_dir = config.translations_dir[0] else: if verbose >= 1: sys.stderr.write( - f"Error: Could not find translation dir \"{translations_dir}\"\n") + f'Error: Could not find translation dir "{translations_dir}"\n' + ) sys.exit(1) # ------------------------------------------------ for p in config.add_resources: if verbose >= 3: - print(f"Checking for \"{p}\"...") + print(f'Checking for "{p}"...') if not os.path.exists(p): if verbose >= 1: - sys.stderr.write( - f"Error: Could not find additional resource file \"{p}\"\n") + sys.stderr.write(f'Error: Could not find additional resource file "{p}"\n') sys.exit(1) # ------------------------------------------------ if len(config.fancy) == 1: if verbose >= 3: print("Fancy: Importing plistlib...") try: import plistlib except ImportError: if verbose >= 1: sys.stderr.write( - "Error: Could not import plistlib which is required for fancy disk images.\n") + "Error: Could not import plistlib which is required for fancy disk" + " images.\n" + ) sys.exit(1) p = config.fancy[0] if verbose >= 3: - print(f"Fancy: Loading \"{p}\"...") + print(f'Fancy: Loading "{p}"...') if not os.path.exists(p): if verbose >= 1: - sys.stderr.write( - f"Error: Could not find fancy disk image plist at \"{p}\"\n") + sys.stderr.write(f'Error: Could not find fancy disk image plist at "{p}"\n') sys.exit(1) try: fancy = plistlib.readPlist(p) except BaseException: if verbose >= 1: sys.stderr.write( - f"Error: Could not parse fancy disk image plist at \"{p}\"\n") + f'Error: Could not parse fancy disk image plist at "{p}"\n' + ) sys.exit(1) try: assert "window_bounds" not in fancy or ( - isinstance( - fancy["window_bounds"], - list) and len( - fancy["window_bounds"]) == 4) + isinstance(fancy["window_bounds"], list) + and len(fancy["window_bounds"]) == 4 + ) assert "background_picture" not in fancy or isinstance( - fancy["background_picture"], str) + fancy["background_picture"], str + ) assert "icon_size" not in fancy or isinstance(fancy["icon_size"], int) assert "applications_symlink" not in fancy or isinstance( - fancy["applications_symlink"], bool) + fancy["applications_symlink"], bool + ) if "items_position" in fancy: assert isinstance(fancy["items_position"], dict) for key, value in fancy["items_position"].items(): - assert isinstance( - value, - list) and len(value) == 2 and isinstance( - value[0], - int) and isinstance( - value[1], - int) + assert ( + isinstance(value, list) + and len(value) == 2 + and isinstance(value[0], int) + and isinstance(value[1], int) + ) except BaseException: if verbose >= 1: - sys.stderr.write( - f"Error: Bad format of fancy disk image plist at \"{p}\"\n") + sys.stderr.write(f'Error: Bad format of fancy disk image plist at "{p}"\n') sys.exit(1) if "background_picture" in fancy: bp = fancy["background_picture"] if verbose >= 3: - print(f"Fancy: Resolving background picture \"{bp}\"...") + print(f'Fancy: Resolving background picture "{bp}"...') if not os.path.exists(bp): bp = os.path.join(os.path.dirname(p), bp) if not os.path.exists(bp): if verbose >= 1: sys.stderr.write( - "Error: Could not find background picture at \"{}\" or \"{}\"\n".format( - fancy["background_picture"], bp)) + 'Error: Could not find background picture at "{}" or "{}"\n' + .format(fancy["background_picture"], bp) + ) sys.exit(1) else: fancy["background_picture"] = bp else: fancy = None # ------------------------------------------------ if os.path.exists("dist"): if verbose >= 2: print("+ Removing old dist folder +") shutil.rmtree("dist") # ------------------------------------------------ if len(config.volname) == 1: volname = config.volname[0] else: volname = app_bundle_name # ------------------------------------------------ target = os.path.join("dist", "BitcoinABC-Qt.app") if verbose >= 2: print("+ Copying source bundle +") if verbose >= 3: print(app_bundle, "->", target) os.mkdir("dist") shutil.copytree(app_bundle, target, symlinks=True) applicationBundle = ApplicationBundleInfo(target) # ------------------------------------------------ if verbose >= 2: print("+ Deploying frameworks +") try: deploymentInfo = deployFrameworksForAppBundle( - applicationBundle, config.strip, verbose) + applicationBundle, config.strip, verbose + ) if deploymentInfo.qtPath is None: deploymentInfo.qtPath = os.getenv("QTDIR", None) if deploymentInfo.qtPath is None: if verbose >= 1: sys.stderr.write( - "Warning: Could not detect Qt's path, skipping plugin deployment!\n") + "Warning: Could not detect Qt's path, skipping plugin deployment!\n" + ) config.plugins = False except RuntimeError as e: if verbose >= 1: sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ if config.plugins: if verbose >= 2: print("+ Deploying plugins +") try: deployPlugins(applicationBundle, deploymentInfo, config.strip, verbose) except RuntimeError as e: if verbose >= 1: sys.stderr.write(f"Error: {str(e)}\n") sys.exit(1) # ------------------------------------------------ if len(config.add_qt_tr) == 0: add_qt_tr = [] else: if translations_dir is not None: qt_tr_dir = translations_dir else: if deploymentInfo.qtPath is not None: qt_tr_dir = os.path.join(deploymentInfo.qtPath, "translations") else: sys.stderr.write("Error: Could not find Qt translation path\n") sys.exit(1) - add_qt_tr = [f"qt_{lng}.qm" - for lng in config.add_qt_tr[0].split(",")] + add_qt_tr = [f"qt_{lng}.qm" for lng in config.add_qt_tr[0].split(",")] for lng_file in add_qt_tr: p = os.path.join(qt_tr_dir, lng_file) if verbose >= 3: - print(f"Checking for \"{p}\"...") + print(f'Checking for "{p}"...') if not os.path.exists(p): if verbose >= 1: sys.stderr.write( - f"Error: Could not find Qt translation file \"{lng_file}\"\n") + f'Error: Could not find Qt translation file "{lng_file}"\n' + ) sys.exit(1) # ------------------------------------------------ if verbose >= 2: print("+ Installing qt.conf +") with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f: f.write(qt_conf.encode()) # ------------------------------------------------ if len(add_qt_tr) > 0 and verbose >= 2: print("+ Adding Qt translations +") for lng_file in add_qt_tr: if verbose >= 3: print( - os.path.join( - qt_tr_dir, - lng_file), + os.path.join(qt_tr_dir, lng_file), "->", - os.path.join( - applicationBundle.resourcesPath, - lng_file)) + os.path.join(applicationBundle.resourcesPath, lng_file), + ) shutil.copy2( - os.path.join( - qt_tr_dir, lng_file), os.path.join( - applicationBundle.resourcesPath, lng_file)) + os.path.join(qt_tr_dir, lng_file), + os.path.join(applicationBundle.resourcesPath, lng_file), + ) # ------------------------------------------------ if len(config.add_resources) > 0 and verbose >= 2: print("+ Adding additional resources +") for p in config.add_resources: t = os.path.join(applicationBundle.resourcesPath, os.path.basename(p)) if verbose >= 3: print(p, "->", t) if os.path.isdir(p): shutil.copytree(p, t, symlinks=True) else: shutil.copy2(p, t) # ------------------------------------------------ -if config.sign and 'CODESIGNARGS' not in os.environ: +if config.sign and "CODESIGNARGS" not in os.environ: print("You must set the CODESIGNARGS environment variable. Skipping signing.") elif config.sign: if verbose >= 1: print(f"Code-signing app bundle {target}") subprocess.check_call( - f"codesign --force {os.environ['CODESIGNARGS']} {target}", shell=True) + f"codesign --force {os.environ['CODESIGNARGS']} {target}", shell=True + ) # ------------------------------------------------ if config.dmg is not None: def runHDIUtil(verb: str, image_basename: str, **kwargs) -> int: hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"] if "capture_stdout" in kwargs: del kwargs["capture_stdout"] run = subprocess.check_output else: if verbose < 2: hdiutil_args.append("-quiet") elif verbose >= 3: hdiutil_args.append("-verbose") run = subprocess.check_call for key, value in kwargs.items(): hdiutil_args.append("-" + key) if value is not True: hdiutil_args.append(str(value)) return run(hdiutil_args, universal_newlines=True) if verbose >= 2: if fancy is None: print("+ Creating .dmg disk image +") else: print("+ Preparing .dmg disk image +") if config.dmg != "": dmg_name = config.dmg else: spl = app_bundle_name.split(" ") dmg_name = spl[0] + "".join(p.capitalize() for p in spl[1:]) if fancy is None: try: runHDIUtil( "create", dmg_name, srcfolder="dist", format="UDBZ", volname=volname, - ov=True) + ov=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) else: if verbose >= 3: - print("Determining size of \"dist\"...") + print('Determining size of "dist"...') size = 0 for path, dirs, files in os.walk("dist"): for file in files: size += os.path.getsize(os.path.join(path, file)) size += int(size * 0.15) if verbose >= 3: print("Creating temp image for modification...") try: runHDIUtil( "create", dmg_name + ".temp", srcfolder="dist", format="UDRW", size=size, volname=volname, - ov=True) + ov=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) if verbose >= 3: print("Attaching temp image...") try: output = runHDIUtil( "attach", dmg_name + ".temp", readwrite=True, noverify=True, noautoopen=True, - capture_stdout=True) + capture_stdout=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) m = re.search("/Volumes/(.+$)", output) disk_root = m.group(0) disk_name = m.group(1) if verbose >= 2: print("+ Applying fancy settings +") if "background_picture" in fancy: bg_path = os.path.join( - disk_root, ".background", os.path.basename( - fancy["background_picture"])) + disk_root, ".background", os.path.basename(fancy["background_picture"]) + ) os.mkdir(os.path.dirname(bg_path)) if verbose >= 3: print(fancy["background_picture"], "->", bg_path) shutil.copy2(fancy["background_picture"], bg_path) else: bg_path = None if fancy.get("applications_symlink", False): - os.symlink( - "/Applications", - os.path.join( - disk_root, - "Applications")) + os.symlink("/Applications", os.path.join(disk_root, "Applications")) # The Python appscript package broke with OSX 10.8 and isn't being fixed. # So we now build up an AppleScript string and use the osascript command # to make the .dmg file pretty: appscript = Template(""" on run argv tell application "Finder" tell disk "$disk" open set current view of container window to icon view set toolbar visible of container window to false set statusbar visible of container window to false set the bounds of container window to {$window_bounds} set theViewOptions to the icon view options of container window set arrangement of theViewOptions to not arranged set icon size of theViewOptions to $icon_size $background_commands $items_positions close -- close/reopen works around a bug... open update without registering applications delay 5 eject end tell end tell end run """) itemscript = Template( - 'set position of item "${item}" of container window to {${position}}') + 'set position of item "${item}" of container window to {${position}}' + ) items_positions = [] if "items_position" in fancy: for name, position in fancy["items_position"].items(): - params = {"item": name, "position": ",".join( - [str(p) for p in position])} + params = { + "item": name, + "position": ",".join([str(p) for p in position]), + } items_positions.append(itemscript.substitute(params)) params = { "disk": volname, "window_bounds": "300,300,800,620", "icon_size": "96", "background_commands": "", - "items_positions": "\n ".join(items_positions) + "items_positions": "\n ".join(items_positions), } if "window_bounds" in fancy: - params["window_bounds"] = ",".join( - [str(p) for p in fancy["window_bounds"]]) + params["window_bounds"] = ",".join([str(p) for p in fancy["window_bounds"]]) if "icon_size" in fancy: params["icon_size"] = str(fancy["icon_size"]) if bg_path is not None: # Set background file, then call SetFile to make it invisible. # (note: making it invisible first makes set background picture fail) - bgscript = Template("""set background picture of theViewOptions to file ".background:$bgpic" - do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """) + bgscript = Template( + """set background picture of theViewOptions to file ".background:$bgpic" + do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """ + ) params["background_commands"] = bgscript.substitute( - {"bgpic": os.path.basename(bg_path), "disk": params["disk"]}) + {"bgpic": os.path.basename(bg_path), "disk": params["disk"]} + ) s = appscript.substitute(params) if verbose >= 2: print("Running AppleScript:") print(s) - p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE) - p.communicate(input=s.encode('utf-8')) + p = subprocess.Popen(["osascript", "-"], stdin=subprocess.PIPE) + p.communicate(input=s.encode("utf-8")) if p.returncode: print("Error running osascript.") if verbose >= 2: print("+ Finalizing .dmg disk image +") time.sleep(5) try: runHDIUtil( "convert", dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", - ov=True) + ov=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) os.unlink(dmg_name + ".temp.dmg") # ------------------------------------------------ if verbose >= 2: print("+ Done +") sys.exit(0) diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py index 86afbcf0d..e4ea39e18 100755 --- a/contrib/message-capture/message-capture-parser.py +++ b/contrib/message-capture/message-capture-parser.py @@ -1,237 +1,241 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Parse message capture binary files. To be used in conjunction with -capturemessages. """ import argparse import json import os import shutil import sys from io import BytesIO from pathlib import Path from typing import Any, Dict, List, Optional, Union -sys.path.append( - os.path.join( - os.path.dirname(__file__), - '../../test/functional')) +sys.path.append(os.path.join(os.path.dirname(__file__), "../../test/functional")) from test_framework.messages import ser_uint256 # noqa: E402 from test_framework.p2p import MESSAGEMAP # noqa: E402 TIME_SIZE = 8 LENGTH_SIZE = 4 MSGTYPE_SIZE = 12 # The test framework classes stores hashes as large ints in many cases. # These are variables of type uint256 in core. # There isn't a way to distinguish between a large int and a large int that is actually a blob of bytes. # As such, they are itemized here. # Any variables with these names that are of type int are actually uint256 variables. # (These can be easily found by looking for calls to deser_uint256, deser_uint256_vector, and uint256_from_str in messages.py) HASH_INTS = [ "blockhash", "block_hash", "hash", "hashMerkleRoot", "hashPrevBlock", "hashstop", "limited_proofid", "prev_header", "sha256", "stop_hash", ] HASH_INT_VECTORS = [ "hashes", "headers", "vHave", "vHash", ] class MessageTypeNotPrintableError(Exception): pass class ProgressBar: def __init__(self, total: float): self.total = total - self.running = 0. + self.running = 0.0 def set_progress(self, progress: float): cols = shutil.get_terminal_size()[0] if cols <= 12: return max_blocks = cols - 9 num_blocks = int(max_blocks * progress) - print('\r[ {}{} ] {:3.0f}%' - .format('#' * num_blocks, - ' ' * (max_blocks - num_blocks), - progress * 100), - end='') + print( + "\r[ {}{} ] {:3.0f}%".format( + "#" * num_blocks, " " * (max_blocks - num_blocks), progress * 100 + ), + end="", + ) def update(self, more: float): self.running += more self.set_progress(self.running / self.total) def to_jsonable(obj: Any) -> Any: if hasattr(obj, "__dict__"): return obj.__dict__ elif hasattr(obj, "__slots__"): ret: Dict[str, Any] = {} for slot in obj.__slots__: val = getattr(obj, slot, None) if slot in HASH_INTS and isinstance(val, int): ret[slot] = ser_uint256(val).hex() - elif slot in HASH_INT_VECTORS and isinstance(val, list) and isinstance(val[0], int): + elif ( + slot in HASH_INT_VECTORS + and isinstance(val, list) + and isinstance(val[0], int) + ): ret[slot] = [ser_uint256(a).hex() for a in val] else: ret[slot] = to_jsonable(val) return ret elif isinstance(obj, list): return [to_jsonable(a) for a in obj] elif isinstance(obj, bytes): return obj.hex() else: return obj -def process_file(path: str, messages: List[Any], recv: bool, - progress_bar: Optional[ProgressBar]) -> None: - with open(path, 'rb') as f_in: +def process_file( + path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar] +) -> None: + with open(path, "rb") as f_in: if progress_bar: bytes_read = 0 while True: if progress_bar: # Update progress bar diff = f_in.tell() - bytes_read - 1 progress_bar.update(diff) bytes_read = f_in.tell() - 1 # Read the Header tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) if not tmp_header_raw: break tmp_header = BytesIO(tmp_header_raw) time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") - msgtype: bytes = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] + msgtype: bytes = tmp_header.read(MSGTYPE_SIZE).split(b"\x00", 1)[0] length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # Start converting the message to a dictionary msg_dict: Dict[str, Union[int, str]] = {} msg_dict["direction"] = "recv" if recv else "sent" msg_dict["time"] = time # "size" is less readable here, but more readable in the output msg_dict["size"] = length msg_ser = BytesIO(f_in.read(length)) # Determine message type if msgtype not in MESSAGEMAP: # Unrecognized message type try: msgtype_tmp = msgtype.decode() if not msgtype_tmp.isprintable(): raise MessageTypeNotPrintableError msg_dict["msgtype"] = msgtype_tmp except (UnicodeDecodeError, MessageTypeNotPrintableError): msg_dict["msgtype"] = "UNREADABLE" msg_dict["body"] = msg_ser.read().hex() msg_dict["error"] = "Unrecognized message type." messages.append(msg_dict) print( f"WARNING - Unrecognized message type {msgtype!r} in {path}", - file=sys.stderr) + file=sys.stderr, + ) continue # Deserialize the message msg = MESSAGEMAP[msgtype]() msg_dict["msgtype"] = msgtype.decode() try: msg.deserialize(msg_ser) except KeyboardInterrupt: raise except Exception: # Unable to deserialize message body msg_ser.seek(0, os.SEEK_SET) msg_dict["body"] = msg_ser.read().hex() msg_dict["error"] = "Unable to deserialize message." messages.append(msg_dict) print( f"WARNING - Unable to deserialize message in {path}", - file=sys.stderr) + file=sys.stderr, + ) continue # Convert body of message into a jsonable object if length: msg_dict["body"] = to_jsonable(msg) messages.append(msg_dict) if progress_bar: # Update the progress bar to the end of the current file # in case we exited the loop early # Go to end of file f_in.seek(0, os.SEEK_END) diff = f_in.tell() - bytes_read - 1 progress_bar.update(diff) def main(): parser = argparse.ArgumentParser( description=__doc__, - epilog="EXAMPLE \n\t{0} -o out.json /message_capture/**/*.dat".format( - sys.argv[0]), - formatter_class=argparse.RawTextHelpFormatter) + epilog=( + f"EXAMPLE \n\t{sys.argv[0]} -o out.json /message_capture/**/*.dat" + ), + formatter_class=argparse.RawTextHelpFormatter, + ) parser.add_argument( - "capturepaths", - nargs='+', - help="binary message capture files to parse.") + "capturepaths", nargs="+", help="binary message capture files to parse." + ) + parser.add_argument("-o", "--output", help="output file. If unset print to stdout") parser.add_argument( - "-o", "--output", - help="output file. If unset print to stdout") - parser.add_argument( - "-n", "--no-progress-bar", - action='store_true', - help="disable the progress bar. Automatically set if the output is not a terminal") + "-n", + "--no-progress-bar", + action="store_true", + help=( + "disable the progress bar. Automatically set if the output is not a" + " terminal" + ), + ) args = parser.parse_args() - capturepaths = [Path.cwd() / Path(capturepath) - for capturepath in args.capturepaths] + capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] output = Path.cwd() / Path(args.output) if args.output else False use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() messages: List[Any] = [] if use_progress_bar: total_size = sum(capture.stat().st_size for capture in capturepaths) progress_bar = ProgressBar(total_size) else: progress_bar = None for capture in capturepaths: - process_file( - str(capture), - messages, - "recv" in capture.stem, - progress_bar) + process_file(str(capture), messages, "recv" in capture.stem, progress_bar) - messages.sort(key=lambda msg: msg['time']) + messages.sort(key=lambda msg: msg["time"]) if use_progress_bar: progress_bar.set_progress(1) jsonrep = json.dumps(messages) if output: - with open(str(output), 'w+', encoding="utf8") as f_out: + with open(str(output), "w+", encoding="utf8") as f_out: f_out.write(jsonrep) else: print(jsonrep) if __name__ == "__main__": main() diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py index b094e66ec..0eadb7f1c 100755 --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -1,283 +1,289 @@ #!/usr/bin/env python3 # Copyright (c) 2013-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Generate seeds.txt from Pieter's DNS seeder # import collections import re import sys from typing import Dict, List, Union import dns.resolver NSEEDS = 512 MAX_SEEDS_PER_ASN = { - 'ipv4': 6, - 'ipv6': 10, + "ipv4": 6, + "ipv6": 10, } MIN_BLOCKS = 760000 -PATTERN_IPV4 = re.compile( - r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") +PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") -PATTERN_ONION = re.compile( - r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") +PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") # Used to only select nodes with a user agent string compatible with the # eCash network. PATTERN_AGENT = re.compile(r"^(/Bitcoin ABC:0.(26|27).(\d+)\(.+\)/)") def parseline(line: str) -> Union[dict, None]: - """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. + """Parses a line from `seeds_main.txt` into a dictionary of details for that line. or `None`, if the line could not be parsed. """ sline = line.split() if len(sline) < 11: # line too short to be valid, skip it. return None # The user agent is at the end of the line. It may contain space, so we # concatenate. for i in range(12, len(sline)): - sline[11] += ' ' + sline[i] + sline[11] += " " + sline[i] # Remove leftovers del sline[12:] m = PATTERN_IPV4.match(sline[0]) sortkey = None ip = None if m is None: m = PATTERN_IPV6.match(sline[0]) if m is None: m = PATTERN_ONION.match(sline[0]) if m is None: return None else: - net = 'onion' + net = "onion" ipstr = sortkey = m.group(1) port = int(m.group(2)) else: - net = 'ipv6' + net = "ipv6" # Not interested in localhost - if m.group(1) in ['::']: + if m.group(1) in ["::"]: return None ipstr = m.group(1) # XXX parse IPv6 into number, could use name_to_ipv6 from # generate-seeds sortkey = ipstr port = int(m.group(2)) else: # Do IPv4 sanity check ip = 0 for i in range(0, 4): if int(m.group(i + 2)) < 0 or int(m.group(i + 2)) > 255: return None ip = ip + (int(m.group(i + 2)) << (8 * (3 - i))) if ip == 0: return None - net = 'ipv4' + net = "ipv4" sortkey = ip ipstr = m.group(1) port = int(m.group(6)) # Skip bad results. if sline[1] == 0: return None # Extract uptime %. uptime30 = float(sline[7][:-1]) # Extract Unix timestamp of last success. lastsuccess = int(sline[2]) # Extract protocol version. version = int(sline[10]) # Extract user agent. agent = sline[11][1:-1] # Extract service flags. service = int(sline[9], 16) # Extract blocks. blocks = int(sline[8]) # Construct result. return { - 'net': net, - 'ip': ipstr, - 'port': port, - 'ipnum': ip, - 'uptime': uptime30, - 'lastsuccess': lastsuccess, - 'version': version, - 'agent': agent, - 'service': service, - 'blocks': blocks, - 'sortkey': sortkey, + "net": net, + "ip": ipstr, + "port": port, + "ipnum": ip, + "uptime": uptime30, + "lastsuccess": lastsuccess, + "version": version, + "agent": agent, + "service": service, + "blocks": blocks, + "sortkey": sortkey, } def dedup(ips: List[Dict]) -> List[Dict]: - """ Remove duplicates from `ips` where multiple ips share address and port. """ + """Remove duplicates from `ips` where multiple ips share address and port.""" d = {} for ip in ips: - d[ip['ip'], ip['port']] = ip + d[ip["ip"], ip["port"]] = ip return list(d.values()) def filtermultiport(ips: List[Dict]) -> List[Dict]: - """ Filter out hosts with more nodes per IP""" + """Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: - hist[ip['sortkey']].append(ip) + hist[ip["sortkey"]].append(ip) return [value[0] for (key, value) in list(hist.items()) if len(value) == 1] def lookup_asn(net: str, ip: str) -> Union[int, None]: - """ Look up the asn for an `ip` address by querying cymru.com + """Look up the asn for an `ip` address by querying cymru.com on network `net` (e.g. ipv4 or ipv6). Returns in integer ASN or None if it could not be found. """ try: - if net == 'ipv4': + if net == "ipv4": ipaddr = ip - prefix = '.origin' + prefix = ".origin" else: # http://www.team-cymru.com/IP-ASN-mapping.html # 2001:4860:b002:23::68 res = str() # pick the first 4 nibbles - for nb in ip.split(':')[:4]: + for nb in ip.split(":")[:4]: # right padded with '0' for c in nb.zfill(4): # 2001 4860 b002 0023 - res += c + '.' + res += c + "." # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 - ipaddr = res.rstrip('.') - prefix = '.origin6' - - asn = int([x.to_text() for x in dns.resolver.query('.'.join( - reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', - 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) + ipaddr = res.rstrip(".") + prefix = ".origin6" + + asn = int( + [ + x.to_text() + for x in dns.resolver.query( + ".".join(reversed(ipaddr.split("."))) + prefix + ".asn.cymru.com", + "TXT", + ).response.answer + ][0] + .split('"')[1] + .split(" ")[0] + ) return asn except Exception: sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n') return None + # Based on Greg Maxwell's seed_filter.py -def filterbyasn(ips: List[Dict], max_per_asn: Dict, - max_per_net: int) -> List[Dict]: - """ Prunes `ips` by +def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: + """Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. """ # Sift out ips by type - ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] - ips_onion = [ip for ip in ips if ip['net'] == 'onion'] + ips_ipv46 = [ip for ip in ips if ip["net"] in ["ipv4", "ipv6"]] + ips_onion = [ip for ip in ips if ip["net"] == "onion"] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] net_count: Dict[str, int] = collections.defaultdict(int) asn_count: Dict[int, int] = collections.defaultdict(int) for i, ip in enumerate(ips_ipv46): if i % 10 == 0: # give progress update print( f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, - end='', - flush=True) + end="", + flush=True, + ) - if net_count[ip['net']] == max_per_net: + if net_count[ip["net"]] == max_per_net: # do not add this ip as we already too many # ips from this network continue - asn = lookup_asn(ip['net'], ip['ip']) - if asn is None or asn_count[asn] == max_per_asn[ip['net']]: + asn = lookup_asn(ip["net"], ip["ip"]) + if asn is None or asn_count[asn] == max_per_asn[ip["net"]]: # do not add this ip as we already have too many # ips from this ASN on this network continue asn_count[asn] += 1 - net_count[ip['net']] += 1 + net_count[ip["net"]] += 1 result.append(ip) # Add back Onions (up to max_per_net) result.extend(ips_onion[0:max_per_net]) return result def ip_stats(ips: List[Dict]) -> str: - """ Format and return pretty string from `ips`. """ + """Format and return pretty string from `ips`.""" hist: Dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: - hist[ip['net']] += 1 + hist[ip["net"]] += 1 return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" def main(): lines = sys.stdin.readlines() ips = [parseline(line) for line in lines] print( - '\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', - file=sys.stderr) - print(f'{ip_stats(ips):s} Initial', file=sys.stderr) + ( + "\x1b[7m IPv4 IPv6 Onion Pass " + " \x1b[0m" + ), + file=sys.stderr, + ) + print(f"{ip_stats(ips):s} Initial", file=sys.stderr) # Skip entries with invalid address. ips = [ip for ip in ips if ip is not None] - print( - f'{ip_stats(ips):s} Skip entries with invalid address', - file=sys.stderr) + print(f"{ip_stats(ips):s} Skip entries with invalid address", file=sys.stderr) # Skip duplicates (in case multiple seeds files were concatenated) ips = dedup(ips) - print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) + print(f"{ip_stats(ips):s} After removing duplicates", file=sys.stderr) # Enforce minimal number of blocks. - ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] - print( - f'{ip_stats(ips):s} Enforce minimal number of blocks', - file=sys.stderr) + ips = [ip for ip in ips if ip["blocks"] >= MIN_BLOCKS] + print(f"{ip_stats(ips):s} Enforce minimal number of blocks", file=sys.stderr) # Require service bit 1. - ips = [ip for ip in ips if (ip['service'] & 1) == 1] - print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) + ips = [ip for ip in ips if (ip["service"] & 1) == 1] + print(f"{ip_stats(ips):s} Require service bit 1", file=sys.stderr) # Require at least 50% 30-day uptime for clearnet, 10% for onion. req_uptime = { - 'ipv4': 50, - 'ipv6': 50, - 'onion': 10, + "ipv4": 50, + "ipv6": 50, + "onion": 10, } - ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] - print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) + ips = [ip for ip in ips if ip["uptime"] > req_uptime[ip["net"]]] + print(f"{ip_stats(ips):s} Require minimum uptime", file=sys.stderr) # Require a known and recent user agent. - ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] - print( - f'{ip_stats(ips):s} Require a known and recent user agent', - file=sys.stderr) + ips = [ip for ip in ips if PATTERN_AGENT.match(ip["agent"])] + print(f"{ip_stats(ips):s} Require a known and recent user agent", file=sys.stderr) # Sort by availability (and use last success as tie breaker) - ips.sort(key=lambda x: - (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) + ips.sort(key=lambda x: (x["uptime"], x["lastsuccess"], x["ip"]), reverse=True) # Filter out hosts with multiple bitcoin ports, these are likely abusive ips = filtermultiport(ips) print( - f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', - file=sys.stderr) + f"{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports", + file=sys.stderr, + ) # Look up ASNs and limit results, both per ASN and globally. ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) print( - f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', - file=sys.stderr) + f"{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net", + file=sys.stderr, + ) # Sort the results by IP address (for deterministic output). - ips.sort(key=lambda x: (x['net'], x['sortkey'])) + ips.sort(key=lambda x: (x["net"], x["sortkey"])) for ip in ips: - if ip['net'] == 'ipv6': + if ip["net"] == "ipv6": print(f"[{ip['ip']}]:{ip['port']}") else: print(f"{ip['ip']}:{ip['port']}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/teamcity/build-configurations.py b/contrib/teamcity/build-configurations.py index 14d4484a5..8f3f384d3 100755 --- a/contrib/teamcity/build-configurations.py +++ b/contrib/teamcity/build-configurations.py @@ -1,672 +1,712 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import argparse import asyncio import os import random import shutil import stat import subprocess import sys from pathlib import Path, PurePath from string import Template import yaml from deepmerge import always_merger from teamcity import is_running_under_teamcity # type: ignore from teamcity.messages import TeamcityServiceMessages # Default timeout value in seconds. Should be overridden by the # configuration file. DEFAULT_TIMEOUT = 1 * 60 * 60 if sys.version_info < (3, 6): raise SystemError("This script requires python >= 3.6") class BuildConfiguration: def __init__(self, script_root, config_file, build_name=None): self.script_root = script_root self.config_file = config_file self.name = None self.config = {} self.cmake_flags = [] self.build_steps = [] self.build_directory = None self.junit_reports_dir = None self.test_logs_dir = None self.jobs = (os.cpu_count() or 0) + 1 self.project_root = PurePath( subprocess.run( - ['git', 'rev-parse', '--show-toplevel'], + ["git", "rev-parse", "--show-toplevel"], capture_output=True, check=True, - encoding='utf-8', + encoding="utf-8", text=True, ).stdout.strip() ) self.project_commit = subprocess.run( - ['git', 'rev-parse', '--short', 'HEAD'], + ["git", "rev-parse", "--short", "HEAD"], capture_output=True, check=True, - encoding='utf-8', + encoding="utf-8", text=True, ).stdout.strip() if not config_file.is_file(): raise FileNotFoundError( f"The configuration file does not exist {str(config_file)}" ) if build_name is not None: self.load(build_name) def load(self, build_name): self.name = build_name # Read the configuration with open(self.config_file, encoding="utf-8") as f: config = yaml.safe_load(f) # The configuration root should contain a mandatory element "builds", and # it should not be empty. if not config.get("builds", None): raise AssertionError( - "Invalid configuration file {}: the \"builds\" element is missing or empty".format( - str(self.config_file) - ) + 'Invalid configuration file {}: the "builds" element is missing or' + " empty".format(str(self.config_file)) ) # Check the target build has an entry in the configuration file build = config["builds"].get(self.name, None) if not build: raise AssertionError( "{} is not a valid build identifier. Valid identifiers are {}".format( self.name, list(config.keys()) ) ) # Get a list of the templates, if any templates = config.get("templates", {}) # If the build references some templates, merge all the configurations. # The merge is applied in the same order as the templates are declared # in the template list. template_config = {} template_names = build.get("templates", []) for template_name in template_names: # Raise an error if the template does not exist if template_name not in templates: raise AssertionError( - "Build {} configuration inherits from template {}, but the template does not exist.".format( - self.name, - template_name - ) + "Build {} configuration inherits from template {}, but the template" + " does not exist.".format(self.name, template_name) ) always_merger.merge(template_config, templates.get(template_name)) self.config = always_merger.merge(template_config, build) # Create the build directory as needed self.build_directory = Path( - self.project_root.joinpath( - 'abc-ci-builds', - self.name)) + self.project_root.joinpath("abc-ci-builds", self.name) + ) # Define the junit and logs directories self.junit_reports_dir = self.build_directory.joinpath("test/junit") self.test_logs_dir = self.build_directory.joinpath("test/log") self.functional_test_logs = self.build_directory.joinpath( - "test/tmp/test_runner_*") + "test/tmp/test_runner_*" + ) # We will provide the required environment variables self.environment_variables = { "BUILD_DIR": str(self.build_directory), "CMAKE_PLATFORMS_DIR": self.project_root.joinpath("cmake", "platforms"), "THREADS": str(self.jobs), "TOPLEVEL": str(self.project_root), } def create_script_file(self, dest, content): # Write the content to a script file using a template - with open(self.script_root.joinpath("bash_script.sh.in"), encoding='utf-8') as f: + with open( + self.script_root.joinpath("bash_script.sh.in"), encoding="utf-8" + ) as f: script_template_content = f.read() template = Template(script_template_content) - with open(dest, 'w', encoding='utf-8') as f: + with open(dest, "w", encoding="utf-8") as f: f.write( template.safe_substitute( **self.environment_variables, SCRIPT_CONTENT=content, ) ) dest.chmod(dest.stat().st_mode | stat.S_IEXEC) def create_build_steps(self, artifact_dir, preview_url, ip_address): # There are 3 possibilities to define the build steps: # - By manually defining a script to run. # - By specifying a docker configuration to build # - By defining the configuration options and a list of target groups to # run. The configuration step should be run once then all the targets # groups. Each target group can contain 1 or more targets which # should be run parallel. script = self.config.get("script", None) if script: script_file = self.build_directory.joinpath("script.sh") self.create_script_file(script_file, script) self.build_steps = [ { "bin": str(script_file), "args": [], } ] return # Check for a docker configuration docker_config = self.config.get("docker", None) if docker_config: # Make sure we have at least a context context = docker_config.get("context", None) if context is None: raise AssertionError( - f"The docker configuration for build {self.name} is missing a context, aborting" + f"The docker configuration for build {self.name} is missing a" + " context, aborting" ) # Make the context path absolute context = self.project_root.joinpath(context) # Make sure the context is a subdir of the git repository. This # prevents e.g. the use of .. as a context path. if Path(self.project_root) not in Path(context).resolve().parents: raise AssertionError( "The docker context should be a subdirectory of the project root" ) dockerfile = docker_config.get("dockerfile", None) - dockerfile_args = [ - "-f", str(self.project_root.joinpath(dockerfile))] if dockerfile else [] + dockerfile_args = ( + ["-f", str(self.project_root.joinpath(dockerfile))] + if dockerfile + else [] + ) tag_name = "-".join([self.name, self.project_commit]) # Docker build self.build_steps.append( { "bin": "docker", - "args": ["build"] + dockerfile_args + ["-t", tag_name, str(context)], + "args": ( + ["build"] + dockerfile_args + ["-t", tag_name, str(context)] + ), } ) inner_port = docker_config.get("port", 80) outer_port = random.randrange(41000, 42000) port_args = ["-p", f"{outer_port}:{inner_port}"] # Docker run. This uses a timeout value to stop the container after # some time. The stop signal is defined to sigterm so the app has a # chance of gracefully handle the stop request, and defaults to a # less subtle SIGKILL if it didn't abort after a minute. self.build_steps.append( { "bin": "docker", - "args": ["run", "--rm", "-d", "--name", tag_name, "--stop-signal", "SIGTERM", "--stop-timeout", "60"] + port_args + [tag_name], + "args": ( + [ + "run", + "--rm", + "-d", + "--name", + tag_name, + "--stop-signal", + "SIGTERM", + "--stop-timeout", + "60", + ] + + port_args + + [tag_name] + ), } ) timeout_minutes = docker_config.get("timeout_minutes", 60) # Write the address to stdout and to the preview_url log file - preview_msg = f"Preview is available at http://{ip_address}:{outer_port} for the next {timeout_minutes} minutes." - with open(preview_url, 'w', encoding='utf-8') as f: + preview_msg = ( + f"Preview is available at http://{ip_address}:{outer_port} for the next" + f" {timeout_minutes} minutes." + ) + with open(preview_url, "w", encoding="utf-8") as f: f.write(preview_msg) self.build_steps.append( { "bin": "echo", "args": [preview_msg], } ) # Now we need to schedule a job to stop or kill the container after # the timeout expires. We achieve this via the `at` command, but # there is a catch: `at` memorize the current working directory and # will execute the scheduled command from there. This is a problem # for our build as the build directory might have been deleted by # the time the timeout expires and the command would fail. To # prevent this issue we are changing the working directory before # at is called to something that we are sure will not get deleted: # the user home directory. # # Note for developers: debugging the `at` command failures can be # tricky, but the `at` command will send an email with the command # stdout/stderr upon failure (not upon success by default), so the # issues can be tracked by looking at the /var/mail/ file. script_file = self.build_directory.joinpath("docker_timeout.sh") self.create_script_file( script_file, - f'cd "${{HOME}}" && echo "docker stop {tag_name}" | at now +{timeout_minutes} minutes') + ( + f'cd "${{HOME}}" && echo "docker stop {tag_name}" | at now' + f" +{timeout_minutes} minutes" + ), + ) self.build_steps.append( { "bin": str(script_file), "args": [], } ) return # Get the cmake configuration definitions. self.cmake_flags = self.config.get("cmake_flags", []) self.cmake_flags.append(f"-DCMAKE_INSTALL_PREFIX={str(artifact_dir)}") # Get the targets to build. If none is provided then raise an error. targets = self.config.get("targets", None) if not targets: raise AssertionError( - "No build target has been provided for build {} and no script is defined, aborting".format( - self.name - ) + "No build target has been provided for build {} and no script is" + " defined, aborting".format(self.name) ) # Some more flags for the build_cmake.sh script if self.config.get("clang", False): - self.cmake_flags.extend([ - "-DCMAKE_C_COMPILER=clang", - "-DCMAKE_CXX_COMPILER=clang++", - ]) + self.cmake_flags.extend( + [ + "-DCMAKE_C_COMPILER=clang", + "-DCMAKE_CXX_COMPILER=clang++", + ] + ) if self.config.get("gcc", False): - self.cmake_flags.extend([ - "-DCMAKE_C_COMPILER=gcc", - "-DCMAKE_CXX_COMPILER=g++", - ]) + self.cmake_flags.extend( + [ + "-DCMAKE_C_COMPILER=gcc", + "-DCMAKE_CXX_COMPILER=g++", + ] + ) if self.config.get("junit", True): - self.cmake_flags.extend([ - "-DENABLE_JUNIT_REPORT=ON", - ]) + self.cmake_flags.extend( + [ + "-DENABLE_JUNIT_REPORT=ON", + ] + ) if self.config.get("Werror", False): - self.cmake_flags.extend([ - "-DCMAKE_C_FLAGS=-Werror", - "-DCMAKE_CXX_FLAGS=-Werror", - ]) + self.cmake_flags.extend( + [ + "-DCMAKE_C_FLAGS=-Werror", + "-DCMAKE_CXX_FLAGS=-Werror", + ] + ) # Get the generator, default to ninja generator = self.config.get("generator", {}) generator_name = generator.get("name", "Ninja") generator_command = generator.get("command", "ninja") # If the build runs on diff or has the fail_fast flag, exit on first error. # Otherwise keep running so we can gather more test result. - fail_fast = self.config.get( - "fail_fast", False) or self.config.get( - "runOnDiff", False) - generator_flags = generator.get( - "flags", ["-k0"] if not fail_fast else []) + fail_fast = self.config.get("fail_fast", False) or self.config.get( + "runOnDiff", False + ) + generator_flags = generator.get("flags", ["-k0"] if not fail_fast else []) # Max out the jobs by default when the generator uses make if generator_command == "make": generator_flags.append(f"-j{self.jobs}") # Handle cross build configuration cross_build = self.config.get("cross_build", None) if cross_build: static_depends = cross_build.get("static_depends", None) toolchain = cross_build.get("toolchain", None) emulator = cross_build.get("emulator", None) # Both static_depends and toochain are mandatory for cross builds if not static_depends: raise AssertionError( - "`static_depends` configuration is required for cross builds") + "`static_depends` configuration is required for cross builds" + ) if not toolchain: raise AssertionError( - "`toolchain` configuration is required for cross builds") + "`toolchain` configuration is required for cross builds" + ) self.build_steps.append( { - "bin": str(self.project_root.joinpath("contrib/devtools/build_depends.sh")), + "bin": str( + self.project_root.joinpath("contrib/devtools/build_depends.sh") + ), "args": [static_depends], } ) toolchain_file = self.project_root.joinpath( f"cmake/platforms/{toolchain}.cmake" ) - self.cmake_flags.append( - f"-DCMAKE_TOOLCHAIN_FILE={str(toolchain_file)}" - ) + self.cmake_flags.append(f"-DCMAKE_TOOLCHAIN_FILE={str(toolchain_file)}") if emulator: self.cmake_flags.append( f"-DCMAKE_CROSSCOMPILING_EMULATOR={shutil.which(emulator)}" ) # Configure using cmake. self.build_steps.append( { "bin": "cmake", - "args": ["-G", generator_name, str(self.project_root)] + self.cmake_flags, + "args": [ + "-G", + generator_name, + str(self.project_root), + ] + self.cmake_flags, } ) for target_group in targets: self.build_steps.append( { "bin": generator_command, "args": generator_flags + target_group, } ) # If a post build script is defined, add it as a last step post_build = self.config.get("post_build", None) if post_build: script_file = self.build_directory.joinpath("post_build.sh") self.create_script_file(script_file, post_build) self.build_steps.append( { "bin": str(script_file), "args": [], } ) def get(self, key, default): return self.config.get(key, default) -class UserBuild(): +class UserBuild: def __init__(self, configuration): self.configuration = configuration build_directory = self.configuration.build_directory self.artifact_dir = build_directory.joinpath("artifacts") # Build 2 log files: # - the full log will contain all unfiltered content # - the clean log will contain the same filtered content as what is # printed to stdout. This filter is done in print_line_to_logs(). self.logs = {} - self.logs["clean_log"] = build_directory.joinpath( - "build.clean.log") + self.logs["clean_log"] = build_directory.joinpath("build.clean.log") self.logs["full_log"] = build_directory.joinpath("build.full.log") # Clean the build directory before any build step is run. if self.configuration.build_directory.is_dir(): shutil.rmtree(self.configuration.build_directory) self.configuration.build_directory.mkdir(exist_ok=True, parents=True) self.preview_url = build_directory.joinpath("preview_url.log") - self.ip_address = '127.0.0.1' + self.ip_address = "127.0.0.1" def copy_artifacts(self, artifacts): # Make sure the artifact directory always exists. It is created before # the build is run (to let the build install things to it) but since we # have no control on what is being executed, it might very well be # deleted by the build as well. This can happen when the artifacts # are located in the build directory and the build calls git clean. self.artifact_dir.mkdir(exist_ok=True) # Find and copy artifacts. # The source is relative to the build tree, the destination relative to # the artifact directory. # The artifact directory is located in the build directory tree, results # from it needs to be excluded from the glob matches to prevent infinite # recursion. for pattern, dest in artifacts.items(): - matches = [m for m in sorted(self.configuration.build_directory.glob( - pattern)) if self.artifact_dir not in m.parents and self.artifact_dir != m] + matches = [ + m + for m in sorted(self.configuration.build_directory.glob(pattern)) + if self.artifact_dir not in m.parents and self.artifact_dir != m + ] dest = self.artifact_dir.joinpath(dest) # Pattern did not match if not matches: continue # If there is a single file, destination is the new file path if len(matches) == 1 and matches[0].is_file(): # Create the parent directories as needed dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(matches[0], dest) continue # If there are multiple files or a single directory, destination is a # directory. dest.mkdir(parents=True, exist_ok=True) for match in matches: if match.is_file(): shutil.copy2(match, dest) else: # FIXME after python => 3.8 is enforced, avoid the # try/except block and use dirs_exist_ok=True instead. try: shutil.copytree(match, dest.joinpath(match.name)) except FileExistsError: pass def print_line_to_logs(self, line): # Always print to the full log - with open(self.logs["full_log"], 'a', encoding='utf-8') as log: + with open(self.logs["full_log"], "a", encoding="utf-8") as log: log.write(line) # Discard the set -x bash output for stdout and the clean log if not line.startswith("+"): - with open(self.logs["clean_log"], 'a', encoding='utf-8') as log: + with open(self.logs["clean_log"], "a", encoding="utf-8") as log: log.write(line) print(line.rstrip()) async def process_stdout(self, stdout): while True: try: line = await stdout.readline() - line = line.decode('utf-8') + line = line.decode("utf-8") if not line: break self.print_line_to_logs(line) except ValueError: self.print_line_to_logs( "--- Line discarded due to StreamReader overflow ---" ) continue def run_process(self, binary, args=None): args = args if args is not None else [] return asyncio.create_subprocess_exec( *([binary] + args), # Buffer limit is 64KB by default, but we need a larger buffer: limit=1024 * 256, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.STDOUT, cwd=self.configuration.build_directory, env={ **os.environ, **self.configuration.environment_variables, **self.configuration.get("env", {}), "ARTIFACT_DIR": str(self.artifact_dir), "CMAKE_FLAGS": " ".join(self.configuration.cmake_flags), }, ) async def run_build(self, binary, args=None): args = args if args is not None else [] proc = await self.run_process(binary, args) logging_task = asyncio.ensure_future(self.process_stdout(proc.stdout)) # Block until the process is finished result = await proc.wait() # Wait up to a few seconds for logging to flush. Normally, this will # finish immediately. try: await asyncio.wait_for(logging_task, timeout=5) except asyncio.TimeoutError: self.print_line_to_logs( - "Warning: Timed out while waiting for logging to flush. Some log lines may be missing.") + "Warning: Timed out while waiting for logging to flush. Some log lines" + " may be missing." + ) return result async def wait_for_build(self, timeout, args=None): args = args if args is not None else [] message = f"Build {self.configuration.name} completed successfully" try: for step in self.configuration.build_steps: - return_code = await asyncio.wait_for(self.run_build(step["bin"], step["args"]), timeout) + return_code = await asyncio.wait_for( + self.run_build(step["bin"], step["args"]), timeout + ) if return_code != 0: message = "Build {} failed with exit code {}".format( - self.configuration.name, - return_code + self.configuration.name, return_code ) return except asyncio.TimeoutError: message = "Build {} timed out after {:.1f}s".format( self.configuration.name, round(timeout, 1) ) # The process is killed, set return code to 128 + 9 (SIGKILL) = 137 return_code = 137 finally: self.print_line_to_logs(message) build_directory = self.configuration.build_directory # Always add the build logs to the root of the artifacts artifacts = { **self.configuration.get("artifacts", {}), str(self.logs["full_log"].relative_to(build_directory)): "", str(self.logs["clean_log"].relative_to(build_directory)): "", - str(self.configuration.junit_reports_dir.relative_to(build_directory)): "", + str( + self.configuration.junit_reports_dir.relative_to(build_directory) + ): "", str(self.configuration.test_logs_dir.relative_to(build_directory)): "", - str(self.configuration.functional_test_logs.relative_to(build_directory)): "functional", + str( + self.configuration.functional_test_logs.relative_to(build_directory) + ): "functional", str(self.preview_url.relative_to(build_directory)): "", } self.copy_artifacts(artifacts) return (return_code, message) def run(self, args=None): args = args if args is not None else [] if self.artifact_dir.is_dir(): shutil.rmtree(self.artifact_dir) self.artifact_dir.mkdir(exist_ok=True) self.configuration.create_build_steps( - self.artifact_dir, self.preview_url, self.ip_address) + self.artifact_dir, self.preview_url, self.ip_address + ) return_code, message = asyncio.run( - self.wait_for_build( - self.configuration.get( - "timeout", DEFAULT_TIMEOUT)) + self.wait_for_build(self.configuration.get("timeout", DEFAULT_TIMEOUT)) ) return (return_code, message) class TeamcityBuild(UserBuild): def __init__(self, configuration): super().__init__(configuration) # This accounts for the volume mapping from the container. # Our local /results is mapped to some relative ./results on the host, # so we use /results/artifacts to copy our files but results/artifacts as # an artifact path for teamcity. # TODO abstract out the volume mapping self.artifact_dir = Path("/results/artifacts") self.teamcity_messages = TeamcityServiceMessages() # Only gather the public IP if we are running on a TC build agent from whatismyip import whatismyip + self.ip_address = whatismyip() def copy_artifacts(self, artifacts): super().copy_artifacts(artifacts) # Start loading the junit reports. junit_reports_pattern = f"{str(self.artifact_dir.relative_to('/'))}/junit/*.xml" self.teamcity_messages.importData("junit", junit_reports_pattern) # Instruct teamcity to upload our artifact directory artifact_path_pattern = "+:{}=>artifacts.tar.gz".format( str(self.artifact_dir.relative_to("/")) ) self.teamcity_messages.publishArtifacts(artifact_path_pattern) def run(self, args=None): args = args if args is not None else [] # Let the user know what build is being run. # This makes it easier to retrieve the info from the logs. self.teamcity_messages.customMessage( - f"Starting build {self.configuration.name}", - status="NORMAL" + f"Starting build {self.configuration.name}", status="NORMAL" ) return_code, message = super().run() # Since we are aborting the build, make sure to flush everything first os.sync() if return_code != 0: # Add a build problem to the report self.teamcity_messages.buildProblem( message, # Let Teamcity calculate an ID from our message - None + None, ) # Change the final build message self.teamcity_messages.buildStatus( # Don't change the status, let Teamcity set it to failure None, - message + message, ) else: # Change the final build message but keep the original one as well self.teamcity_messages.buildStatus( # Don't change the status, let Teamcity set it to success None, - f"{message} ({{build.status.text}})" + f"{message} ({{build.status.text}})", ) return (return_code, message) def main(): script_dir = PurePath(os.path.realpath(__file__)).parent # By default search for a configuration file in the same directory as this # script. - default_config_path = Path( - script_dir.joinpath("build-configurations.yml") - ) + default_config_path = Path(script_dir.joinpath("build-configurations.yml")) parser = argparse.ArgumentParser(description="Run a CI build") - parser.add_argument( - "build", - help="The name of the build to run" - ) + parser.add_argument("build", help="The name of the build to run") parser.add_argument( "--config", "-c", help="Path to the builds configuration file (default to {})".format( str(default_config_path) - ) + ), ) args, unknown_args = parser.parse_known_args() # Check the configuration file exists config_path = Path(args.config) if args.config else default_config_path - build_configuration = BuildConfiguration( - script_dir, config_path, args.build) + build_configuration = BuildConfiguration(script_dir, config_path, args.build) if is_running_under_teamcity(): build = TeamcityBuild(build_configuration) else: build = UserBuild(build_configuration) sys.exit(build.run(unknown_args)[0]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/teamcity/nanobench_json_to_teamcity_messages.py b/contrib/teamcity/nanobench_json_to_teamcity_messages.py index cb6ade80e..033a53418 100755 --- a/contrib/teamcity/nanobench_json_to_teamcity_messages.py +++ b/contrib/teamcity/nanobench_json_to_teamcity_messages.py @@ -1,85 +1,78 @@ #!/usr/bin/env python3 # Copyright (c) 2021 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import json import sys from datetime import timedelta from teamcity.messages import TeamcityServiceMessages if len(sys.argv) != 3: - print( - f""" + print(f""" Usage: {sys.argv[0]} Print the teamcity service messages for associating op/s benchmark result to the tests. Requires the teamcity-messages python library: pip3 install teamcity-messages """) sys.exit(1) suite_name = sys.argv[1] -with open(sys.argv[2], encoding='utf-8') as f: +with open(sys.argv[2], encoding="utf-8") as f: json_results = json.load(f) teamcity_messages = TeamcityServiceMessages() -teamcity_messages.testSuiteStarted( - suite_name -) +teamcity_messages.testSuiteStarted(suite_name) def testMetadata_number_message(test_name, param_name, param_value): teamcity_messages.message( - 'testMetadata', - type='number', + "testMetadata", + type="number", testName=test_name, name=param_name, - value=f'{param_value:.2f}', + value=f"{param_value:.2f}", ) -for result in json_results.get('results', []): - test_name = result['name'] +for result in json_results.get("results", []): + test_name = result["name"] - teamcity_messages.testStarted( - test_name - ) + teamcity_messages.testStarted(test_name) testMetadata_number_message( test_name, f"ns/{result['unit']}", - 1e9 * result['median(elapsed)'] / result['batch'], + 1e9 * result["median(elapsed)"] / result["batch"], ) testMetadata_number_message( test_name, f"{result['unit']}/s", - result['batch'] / result['median(elapsed)'], + result["batch"] / result["median(elapsed)"], ) testMetadata_number_message( test_name, - 'err%', - 100 * result['medianAbsolutePercentError(elapsed)'], + "err%", + 100 * result["medianAbsolutePercentError(elapsed)"], ) testMetadata_number_message( test_name, f"ins/{result['unit']}", - result['median(instructions)'] / result['batch'], + result["median(instructions)"] / result["batch"], ) teamcity_messages.testFinished( test_name, - testDuration=timedelta(seconds=result['totalTime']), + testDuration=timedelta(seconds=result["totalTime"]), ) -teamcity_messages.testSuiteFinished( - suite_name -) +teamcity_messages.testSuiteFinished(suite_name) diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py index 07cbcadcf..ddcb96093 100644 --- a/contrib/testgen/base58.py +++ b/contrib/testgen/base58.py @@ -1,127 +1,126 @@ #!/usr/bin/env python3 # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' +""" Bitcoin base58 encoding and decoding. Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain) -''' +""" import hashlib # for compatibility with following code... class SHA256: new = hashlib.sha256 if str != bytes: # Python 3.x def ord(c): # noqa: A001 return c def chr(n): # noqa: A001 return bytes((n,)) -__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' + +__b58chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): - """ encode v, which is a string of bytes, to base58. - """ + """encode v, which is a string of bytes, to base58.""" long_value = 0 - for (i, c) in enumerate(v[::-1]): + for i, c in enumerate(v[::-1]): if isinstance(c, str): c = ord(c) long_value += (256**i) * c - result = '' + result = "" while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result long_value = div result = __b58chars[long_value] + result # Bitcoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == 0: nPad += 1 else: break return (__b58chars[0] * nPad) + result def b58decode(v, length=None): - """ decode v into a string of len bytes - """ + """decode v into a string of len bytes""" long_value = 0 for i, c in enumerate(v[::-1]): pos = __b58chars.find(c) assert pos != -1 long_value += pos * (__b58base**i) result = bytes() while long_value >= 256: div, mod = divmod(long_value, 256) result = chr(mod) + result long_value = div result = chr(long_value) + result nPad = 0 for c in v: if c == __b58chars[0]: nPad += 1 continue break result = bytes(nPad) + result if length is not None and len(result) != length: return None return result def checksum(v): """Return 32-bit checksum based on SHA256""" return SHA256.new(SHA256.new(v).digest()).digest()[0:4] def b58encode_chk(v): """b58encode a string, with 32-bit checksum""" return b58encode(v + checksum(v)) def b58decode_chk(v): """decode a base58 string, check and remove checksum""" result = b58decode(v) if result is None: return None if result[-4:] == checksum(result[:-4]): return result[:-4] else: return None def get_bcaddress_version(strAddress): - """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ + """Returns None if strAddress is invalid. Otherwise returns integer version of address.""" addr = b58decode_chk(strAddress) if addr is None or len(addr) != 21: return None version = addr[0] return ord(version) -if __name__ == '__main__': +if __name__ == "__main__": # Test case (from http://gitorious.org/bitcoin/python-base58.git) - assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') == 0 - _ohai = 'o hai'.encode('ascii') + assert get_bcaddress_version("15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC") == 0 + _ohai = "o hai".encode("ascii") _tmp = b58encode(_ohai) - assert _tmp == 'DYB3oMS' + assert _tmp == "DYB3oMS" assert b58decode(_tmp, 5) == _ohai print("Tests passed") diff --git a/contrib/testgen/gen_base58_test_vectors.py b/contrib/testgen/gen_base58_test_vectors.py index 29b69bf79..c5252072d 100755 --- a/contrib/testgen/gen_base58_test_vectors.py +++ b/contrib/testgen/gen_base58_test_vectors.py @@ -1,141 +1,145 @@ #!/usr/bin/env python3 # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' +""" Generate valid and invalid base58 address and private key test vectors. Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json -''' +""" # 2012 Wladimir J. van der Laan # Released under MIT License import os import random from binascii import b2a_hex from itertools import islice from base58 import b58chars, b58decode_chk, b58encode_chk # key types PUBKEY_ADDRESS = 0 SCRIPT_ADDRESS = 5 PUBKEY_ADDRESS_TEST = 111 SCRIPT_ADDRESS_TEST = 196 PRIVKEY = 128 PRIVKEY_TEST = 239 -metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed'] +metadata_keys = ["isPrivkey", "isTestnet", "addrType", "isCompressed"] # templates for valid sequences templates = [ # prefix, payload_size, suffix, metadata # None = N/A - ((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)), - ((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)), - ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)), - ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)), + ((PUBKEY_ADDRESS,), 20, (), (False, False, "pubkey", None)), + ((SCRIPT_ADDRESS,), 20, (), (False, False, "script", None)), + ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, "pubkey", None)), + ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, "script", None)), ((PRIVKEY,), 32, (), (True, False, None, False)), ((PRIVKEY,), 32, (1,), (True, False, None, True)), ((PRIVKEY_TEST,), 32, (), (True, True, None, False)), - ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)) + ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)), ] def is_valid(v): - '''Check vector v for validity''' + """Check vector v for validity""" result = b58decode_chk(v) if result is None: return False for template in templates: prefix = bytearray(template[0]) suffix = bytearray(template[2]) if result.startswith(prefix) and result.endswith(suffix): if (len(result) - len(prefix) - len(suffix)) == template[1]: return True return False def gen_valid_vectors(): - '''Generate valid test vectors''' + """Generate valid test vectors""" while True: for template in templates: prefix = bytearray(template[0]) payload = bytearray(os.urandom(template[1])) suffix = bytearray(template[2]) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) - metadata = {x: y for x, y in zip( - metadata_keys, template[3]) if y is not None} + metadata = { + x: y for x, y in zip(metadata_keys, template[3]) if y is not None + } hexrepr = b2a_hex(payload) if isinstance(hexrepr, bytes): - hexrepr = hexrepr.decode('utf8') + hexrepr = hexrepr.decode("utf8") yield (rv, hexrepr, metadata) -def gen_invalid_vector(template, corrupt_prefix, - randomize_payload_size, corrupt_suffix): - '''Generate possibly invalid vector''' +def gen_invalid_vector( + template, corrupt_prefix, randomize_payload_size, corrupt_suffix +): + """Generate possibly invalid vector""" if corrupt_prefix: prefix = os.urandom(1) else: prefix = bytearray(template[0]) if randomize_payload_size: payload = os.urandom(max(int(random.expovariate(0.5)), 50)) else: payload = os.urandom(template[1]) if corrupt_suffix: suffix = os.urandom(len(template[2])) else: suffix = bytearray(template[2]) return b58encode_chk(prefix + payload + suffix) def randbool(p=0.5): - '''Return True with P(p)''' + """Return True with P(p)""" return random.random() < p def gen_invalid_vectors(): - '''Generate invalid test vectors''' + """Generate invalid test vectors""" # start with some manual edge-cases yield "", yield "x", while True: # kinds of invalid vectors: # invalid prefix # invalid payload length # invalid (randomized) suffix (add random data) # corrupt checksum for template in templates: - val = gen_invalid_vector(template, randbool( - 0.2), randbool(0.2), randbool(0.2)) + val = gen_invalid_vector( + template, randbool(0.2), randbool(0.2), randbool(0.2) + ) if random.randint(0, 10) < 1: # line corruption if randbool(): # add random character to end val += random.choice(b58chars) else: # replace random character in the middle n = random.randint(0, len(val)) - val = val[0:n] + random.choice(b58chars) + val[n + 1:] + val = val[0:n] + random.choice(b58chars) + val[n + 1 :] if not is_valid(val): yield val, -if __name__ == '__main__': +if __name__ == "__main__": import json import sys - iters = {'valid': gen_valid_vectors, 'invalid': gen_invalid_vectors} + + iters = {"valid": gen_valid_vectors, "invalid": gen_invalid_vectors} try: uiter = iters[sys.argv[1]] except IndexError: uiter = gen_valid_vectors try: count = int(sys.argv[2]) except IndexError: count = 0 data = list(islice(uiter(), count)) json.dump(data, sys.stdout, sort_keys=True, indent=4) - sys.stdout.write('\n') + sys.stdout.write("\n") diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py index eb005a71a..2994641ee 100755 --- a/contrib/tracing/log_raw_p2p_msgs.py +++ b/contrib/tracing/log_raw_p2p_msgs.py @@ -1,185 +1,190 @@ #!/usr/bin/env python3 """ Demonstration of eBPF limitations and the effect on USDT with the net:inbound_message and net:outbound_message tracepoints. """ # This script shows a limitation of eBPF when data larger than 32kb is passed to # user-space. It uses BCC (https://github.com/iovisor/bcc) to load a sandboxed # eBPF program into the Linux kernel (root privileges are required). The eBPF # program attaches to two statically defined tracepoints. The tracepoint # 'net:inbound_message' is called when a new P2P message is received, and # 'net:outbound_message' is called on outbound P2P messages. The eBPF program # submits the P2P messages to this script via a BPF ring buffer. The submitted # messages are printed. # eBPF Limitations: # # Bitcoin P2P messages can be larger than 32kb (e.g. tx, block, ...). The eBPF # VM's stack is limited to 512 bytes, and we can't allocate more than about 32kb # for a P2P message in the eBPF VM. The message data is cut off when the message # is larger than MAX_MSG_DATA_LENGTH (see definition below). This can be detected # in user-space by comparing the data length to the message length variable. The # message is cut off when the data length is smaller than the message length. # A warning is included with the printed message data. # # Data is submitted to user-space (i.e. to this script) via a ring buffer. The # throughput of the ring buffer is limited. Each p2p_message is about 32kb in # size. In- or outbound messages submitted to the ring buffer in rapid # succession fill the ring buffer faster than it can be read. Some messages are # lost. # # BCC prints: "Possibly lost 2 samples" on lost messages. import sys from bcc import BPF, USDT # BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into # a sandboxed Linux kernel VM. program = """ #include # define MIN(a,b) ({ __typeof__ (a) _a = (a); __typeof__ (b) _b = (b); _a # < _b ? _a : _b; }) // Maximum possible allocation size // from include/linux/percpu.h in the Linux kernel #define PCPU_MIN_UNIT_SIZE (32 << 10) // Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). #define MAX_PEER_ADDR_LENGTH 62 + 6 #define MAX_PEER_CONN_TYPE_LENGTH 20 #define MAX_MSG_TYPE_LENGTH 20 #define MAX_MSG_DATA_LENGTH PCPU_MIN_UNIT_SIZE - 200 struct p2p_message { u64 peer_id; char peer_addr[MAX_PEER_ADDR_LENGTH]; char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; char msg_type[MAX_MSG_TYPE_LENGTH]; u64 msg_size; u8 msg[MAX_MSG_DATA_LENGTH]; }; // We can't store the p2p_message struct on the eBPF stack as it is limited to // 512 bytes and P2P message can be bigger than 512 bytes. However, we can use // an BPF-array with a length of 1 to allocate up to 32768 bytes (this is // defined by PCPU_MIN_UNIT_SIZE in include/linux/percpu.h in the Linux kernel). // Also see https://github.com/iovisor/bcc/issues/2306 BPF_ARRAY(msg_arr, struct p2p_message, 1); // Two BPF perf buffers for pushing data (here P2P messages) to user-space. BPF_PERF_OUTPUT(inbound_messages); BPF_PERF_OUTPUT(outbound_messages); int trace_inbound_message(struct pt_regs *ctx) { int idx = 0; struct p2p_message *msg = msg_arr.lookup(&idx); // lookup() does not return a NULL pointer. However, the BPF verifier // requires an explicit check that that the `msg` pointer isn't a NULL // pointer. See https://github.com/iovisor/bcc/issues/2595 if (msg == NULL) return 1; bpf_usdt_readarg(1, ctx, &msg->peer_id); bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); bpf_usdt_readarg(5, ctx, &msg->msg_size); bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); inbound_messages.perf_submit(ctx, msg, sizeof(*msg)); return 0; }; int trace_outbound_message(struct pt_regs *ctx) { int idx = 0; struct p2p_message *msg = msg_arr.lookup(&idx); // lookup() does not return a NULL pointer. However, the BPF verifier // requires an explicit check that that the `msg` pointer isn't a NULL // pointer. See https://github.com/iovisor/bcc/issues/2595 if (msg == NULL) return 1; bpf_usdt_readarg(1, ctx, &msg->peer_id); bpf_usdt_readarg_p(2, ctx, &msg->peer_addr, MAX_PEER_ADDR_LENGTH); bpf_usdt_readarg_p(3, ctx, &msg->peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); bpf_usdt_readarg_p(4, ctx, &msg->msg_type, MAX_MSG_TYPE_LENGTH); bpf_usdt_readarg(5, ctx, &msg->msg_size); bpf_usdt_readarg_p(6, ctx, &msg->msg, MIN(msg->msg_size, MAX_MSG_DATA_LENGTH)); outbound_messages.perf_submit(ctx, msg, sizeof(*msg)); return 0; }; """ def print_message(event, inbound): print( "{} {} msg '{}' from peer {} ({}, {}) with {} bytes: {}".format( - "Warning: incomplete message (only {} out of {} bytes)!".format( - len(event.msg), - event.msg_size - ) if len(event.msg) < event.msg_size else "", + ( + "Warning: incomplete message (only {} out of {} bytes)!".format( + len(event.msg), event.msg_size + ) + if len(event.msg) < event.msg_size + else "" + ), "inbound" if inbound else "outbound", event.msg_type.decode("utf-8"), event.peer_id, event.peer_conn_type.decode("utf-8"), event.peer_addr.decode("utf-8"), event.msg_size, - bytes(event.msg[:event.msg_size]).hex(), + bytes(event.msg[: event.msg_size]).hex(), ) ) def main(bitcoind_path): bitcoind_with_usdts = USDT(path=str(bitcoind_path)) # attaching the trace functions defined in the BPF program to the # tracepoints bitcoind_with_usdts.enable_probe( - probe="inbound_message", fn_name="trace_inbound_message") + probe="inbound_message", fn_name="trace_inbound_message" + ) bitcoind_with_usdts.enable_probe( - probe="outbound_message", fn_name="trace_outbound_message") + probe="outbound_message", fn_name="trace_outbound_message" + ) bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) # BCC: perf buffer handle function for inbound_messages def handle_inbound(_, data, size): - """ Inbound message handler. + """Inbound message handler. Called each time a message is submitted to the inbound_messages BPF table.""" event = bpf["inbound_messages"].event(data) print_message(event, True) # BCC: perf buffer handle function for outbound_messages def handle_outbound(_, data, size): - """ Outbound message handler. + """Outbound message handler. Called each time a message is submitted to the outbound_messages BPF table.""" event = bpf["outbound_messages"].event(data) print_message(event, False) # BCC: add handlers to the inbound and outbound perf buffers bpf["inbound_messages"].open_perf_buffer(handle_inbound) bpf["outbound_messages"].open_perf_buffer(handle_outbound) print("Logging raw P2P messages.") print("Messages larger that about 32kb will be cut off!") print("Some messages might be lost!") while True: try: bpf.perf_buffer_poll() except KeyboardInterrupt: exit() if __name__ == "__main__": if len(sys.argv) < 2: print("USAGE:", sys.argv[0], "path/to/bitcoind") exit() path = sys.argv[1] main(path) diff --git a/contrib/tracing/log_utxocache_flush.py b/contrib/tracing/log_utxocache_flush.py index a25c13b28..f894bbfe9 100755 --- a/contrib/tracing/log_utxocache_flush.py +++ b/contrib/tracing/log_utxocache_flush.py @@ -1,109 +1,103 @@ #!/usr/bin/env python3 import ctypes import sys from bcc import BPF, USDT """Example logging Bitcoin ABC utxo set cache flushes utilizing the utxocache:flush tracepoint.""" # USAGE: ./contrib/tracing/log_utxocache_flush.py path/to/bitcoind # BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into # a sandboxed Linux kernel VM. program = """ # include struct data_t { u64 duration; u32 mode; u64 coins_count; u64 coins_mem_usage; bool is_flush_for_prune; }; // BPF perf buffer to push the data to user space. BPF_PERF_OUTPUT(flush); int trace_flush(struct pt_regs *ctx) { struct data_t data = {}; bpf_usdt_readarg(1, ctx, &data.duration); bpf_usdt_readarg(2, ctx, &data.mode); bpf_usdt_readarg(3, ctx, &data.coins_count); bpf_usdt_readarg(4, ctx, &data.coins_mem_usage); bpf_usdt_readarg(5, ctx, &data.is_flush_for_prune); flush.perf_submit(ctx, &data, sizeof(data)); return 0; } """ -FLUSH_MODES = [ - 'NONE', - 'IF_NEEDED', - 'PERIODIC', - 'ALWAYS' -] +FLUSH_MODES = ["NONE", "IF_NEEDED", "PERIODIC", "ALWAYS"] class Data(ctypes.Structure): # define output data structure corresponding to struct data_t _fields_ = [ ("duration", ctypes.c_uint64), ("mode", ctypes.c_uint32), ("coins_count", ctypes.c_uint64), ("coins_mem_usage", ctypes.c_uint64), - ("is_flush_for_prune", ctypes.c_bool) + ("is_flush_for_prune", ctypes.c_bool), ] def print_event(event): - print("{:15d} {:10s} {:15d} {:15s} {:8s}".format( - event.duration, - FLUSH_MODES[event.mode], - event.coins_count, - "{:.2f} kB".format(event.coins_mem_usage / 1000), - str(event.is_flush_for_prune), - )) + print( + "{:15d} {:10s} {:15d} {:15s} {:8s}".format( + event.duration, + FLUSH_MODES[event.mode], + event.coins_count, + "{:.2f} kB".format(event.coins_mem_usage / 1000), + str(event.is_flush_for_prune), + ) + ) def main(bitcoind_path): bitcoind_with_usdts = USDT(path=str(bitcoind_path)) # attaching the trace functions defined in the BPF program # to the tracepoints - bitcoind_with_usdts.enable_probe( - probe="flush", fn_name="trace_flush") + bitcoind_with_usdts.enable_probe(probe="flush", fn_name="trace_flush") b = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) def handle_flush(_, data, size): - """ Coins Flush handler. - Called each time coin caches and indexes are flushed.""" + """Coins Flush handler. + Called each time coin caches and indexes are flushed.""" event = ctypes.cast(data, ctypes.POINTER(Data)).contents print_event(event) b["flush"].open_perf_buffer(handle_flush) print("Logging utxocache flushes. Ctrl-C to end...") print( "{:15s} {:10s} {:15s} {:15s} {:8s}".format( - "Duration (µs)", - "Mode", - "Coins Count", - "Memory Usage", - "Flush for Prune")) + "Duration (µs)", "Mode", "Coins Count", "Memory Usage", "Flush for Prune" + ) + ) while True: try: b.perf_buffer_poll() except KeyboardInterrupt: exit(0) if __name__ == "__main__": if len(sys.argv) < 2: print("USAGE: ", sys.argv[0], "path/to/bitcoind") exit(1) path = sys.argv[1] main(path) diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py index a46183b2b..a08003ef6 100755 --- a/contrib/tracing/p2p_monitor.py +++ b/contrib/tracing/p2p_monitor.py @@ -1,259 +1,313 @@ #!/usr/bin/env python3 """ Interactive bitcoind P2P network traffic monitor utilizing USDT and the net:inbound_message and net:outbound_message tracepoints. """ # This script demonstrates what USDT for Bitcoin ABC can enable. It uses BCC # (https://github.com/iovisor/bcc) to load a sandboxed eBPF program into the # Linux kernel (root privileges are required). The eBPF program attaches to two # statically defined tracepoints. The tracepoint 'net:inbound_message' is called # when a new P2P message is received, and 'net:outbound_message' is called on # outbound P2P messages. The eBPF program submits the P2P messages to # this script via a BPF ring buffer. import curses import sys from curses import panel, wrapper from typing import List from bcc import BPF, USDT # BCC: The C program to be compiled to an eBPF program (by BCC) and loaded into # a sandboxed Linux kernel VM. program = """ #include // Tor v3 addresses are 62 chars + 6 chars for the port (':12345'). // I2P addresses are 60 chars + 6 chars for the port (':12345'). #define MAX_PEER_ADDR_LENGTH 62 + 6 #define MAX_PEER_CONN_TYPE_LENGTH 20 #define MAX_MSG_TYPE_LENGTH 20 struct p2p_message { u64 peer_id; char peer_addr[MAX_PEER_ADDR_LENGTH]; char peer_conn_type[MAX_PEER_CONN_TYPE_LENGTH]; char msg_type[MAX_MSG_TYPE_LENGTH]; u64 msg_size; }; // Two BPF perf buffers for pushing data (here P2P messages) to user space. BPF_PERF_OUTPUT(inbound_messages); BPF_PERF_OUTPUT(outbound_messages); int trace_inbound_message(struct pt_regs *ctx) { struct p2p_message msg = {}; bpf_usdt_readarg(1, ctx, &msg.peer_id); bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); bpf_usdt_readarg(5, ctx, &msg.msg_size); inbound_messages.perf_submit(ctx, &msg, sizeof(msg)); return 0; }; int trace_outbound_message(struct pt_regs *ctx) { struct p2p_message msg = {}; bpf_usdt_readarg(1, ctx, &msg.peer_id); bpf_usdt_readarg_p(2, ctx, &msg.peer_addr, MAX_PEER_ADDR_LENGTH); bpf_usdt_readarg_p(3, ctx, &msg.peer_conn_type, MAX_PEER_CONN_TYPE_LENGTH); bpf_usdt_readarg_p(4, ctx, &msg.msg_type, MAX_MSG_TYPE_LENGTH); bpf_usdt_readarg(5, ctx, &msg.msg_size); outbound_messages.perf_submit(ctx, &msg, sizeof(msg)); return 0; }; """ class Message: - """ A P2P network message. """ + """A P2P network message.""" + msg_type = "" size = 0 data = bytes() inbound = False def __init__(self, msg_type, size, inbound): self.msg_type = msg_type self.size = size self.inbound = inbound class Peer: - """ A P2P network peer. """ + """A P2P network peer.""" + id = 0 address = "" connection_type = "" last_messages: List[Message] = [] total_inbound_msgs = 0 total_inbound_bytes = 0 total_outbound_msgs = 0 total_outbound_bytes = 0 def __init__(self, peer_id, address, connection_type): self.id = peer_id self.address = address self.connection_type = connection_type self.last_messages = [] def add_message(self, message): self.last_messages.append(message) if len(self.last_messages) > 25: self.last_messages.pop(0) if message.inbound: self.total_inbound_bytes += message.size self.total_inbound_msgs += 1 else: self.total_outbound_bytes += message.size self.total_outbound_msgs += 1 def main(bitcoind_path): peers = {} bitcoind_with_usdts = USDT(path=str(bitcoind_path)) # attaching the trace functions defined in the BPF program to the # tracepoints bitcoind_with_usdts.enable_probe( - probe="inbound_message", fn_name="trace_inbound_message") + probe="inbound_message", fn_name="trace_inbound_message" + ) bitcoind_with_usdts.enable_probe( - probe="outbound_message", fn_name="trace_outbound_message") + probe="outbound_message", fn_name="trace_outbound_message" + ) bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) # BCC: perf buffer handle function for inbound_messages def handle_inbound(_, data, size): - """ Inbound message handler. + """Inbound message handler. Called each time a message is submitted to the inbound_messages BPF table.""" event = bpf["inbound_messages"].event(data) if event.peer_id not in peers: - peer = Peer(event.peer_id, event.peer_addr.decode( - "utf-8"), event.peer_conn_type.decode("utf-8")) + peer = Peer( + event.peer_id, + event.peer_addr.decode("utf-8"), + event.peer_conn_type.decode("utf-8"), + ) peers[peer.id] = peer peers[event.peer_id].add_message( - Message(event.msg_type.decode("utf-8"), event.msg_size, True)) + Message(event.msg_type.decode("utf-8"), event.msg_size, True) + ) # BCC: perf buffer handle function for outbound_messages def handle_outbound(_, data, size): - """ Outbound message handler. + """Outbound message handler. Called each time a message is submitted to the outbound_messages BPF table.""" event = bpf["outbound_messages"].event(data) if event.peer_id not in peers: - peer = Peer(event.peer_id, event.peer_addr.decode( - "utf-8"), event.peer_conn_type.decode("utf-8")) + peer = Peer( + event.peer_id, + event.peer_addr.decode("utf-8"), + event.peer_conn_type.decode("utf-8"), + ) peers[peer.id] = peer peers[event.peer_id].add_message( - Message(event.msg_type.decode("utf-8"), event.msg_size, False)) + Message(event.msg_type.decode("utf-8"), event.msg_size, False) + ) # BCC: add handlers to the inbound and outbound perf buffers bpf["inbound_messages"].open_perf_buffer(handle_inbound) bpf["outbound_messages"].open_perf_buffer(handle_outbound) wrapper(loop, bpf, peers) def loop(screen, bpf, peers): screen.nodelay(1) cur_list_pos = 0 win = curses.newwin(30, 70, 2, 7) win.erase() - win.border(ord("|"), ord("|"), ord("-"), ord("-"), - ord("-"), ord("-"), ord("-"), ord("-")) + win.border( + ord("|"), ord("|"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-") + ) info_panel = panel.new_panel(win) info_panel.hide() ROWS_AVALIABLE_FOR_LIST = curses.LINES - 5 scroll = 0 while True: try: # BCC: poll the perf buffers for new events or timeout after 50ms bpf.perf_buffer_poll(timeout=50) ch = screen.getch() - if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len( - peers.keys()) - 1 and info_panel.hidden(): + if ( + (ch == curses.KEY_DOWN or ch == ord("j")) + and cur_list_pos < len(peers.keys()) - 1 + and info_panel.hidden() + ): cur_list_pos += 1 if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST: scroll += 1 - if ((ch == curses.KEY_UP or ch == ord("k")) - and cur_list_pos > 0 and info_panel.hidden()): + if ( + (ch == curses.KEY_UP or ch == ord("k")) + and cur_list_pos > 0 + and info_panel.hidden() + ): cur_list_pos -= 1 if scroll > 0: scroll -= 1 - if ch == ord('\n') or ch == ord(' '): + if ch == ord("\n") or ch == ord(" "): if info_panel.hidden(): info_panel.show() else: info_panel.hide() screen.erase() render( - screen, - peers, - cur_list_pos, - scroll, - ROWS_AVALIABLE_FOR_LIST, - info_panel) + screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel + ) curses.panel.update_panels() screen.refresh() except KeyboardInterrupt: exit() -def render(screen, peers, cur_list_pos, scroll, - ROWS_AVALIABLE_FOR_LIST, info_panel): - """ renders the list of peers and details panel +def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel): + """renders the list of peers and details panel This code is unrelated to USDT, BCC and BPF. """ header_format = "%6s %-20s %-20s %-22s %-67s" row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s" - screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE) + screen.addstr(0, 1, " P2P Message Monitor ", curses.A_REVERSE) + screen.addstr( + 1, + 0, + ( + " Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see" + " individual P2P messages" + ), + curses.A_NORMAL, + ) screen.addstr( - 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL) - screen.addstr(3, 0, - header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE) - peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST + scroll] + 3, + 0, + header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), + curses.A_BOLD | curses.A_UNDERLINE, + ) + peer_list = sorted(peers.keys())[scroll : ROWS_AVALIABLE_FOR_LIST + scroll] for i, peer_id in enumerate(peer_list): peer = peers[peer_id] - screen.addstr(i + 4, 0, - row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes, - peer.total_inbound_msgs, peer.total_inbound_bytes, - peer.connection_type, peer.address), - curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL) + screen.addstr( + i + 4, + 0, + row_format + % ( + peer.id, + peer.total_outbound_msgs, + peer.total_outbound_bytes, + peer.total_inbound_msgs, + peer.total_inbound_bytes, + peer.connection_type, + peer.address, + ), + curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL, + ) if i + scroll == cur_list_pos: info_window = info_panel.window() info_window.erase() info_window.border( - ord("|"), ord("|"), ord("-"), ord("-"), - ord("-"), ord("-"), ord("-"), ord("-")) + ord("|"), + ord("|"), + ord("-"), + ord("-"), + ord("-"), + ord("-"), + ord("-"), + ord("-"), + ) info_window.addstr( - 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD) + 1, + 1, + f"PEER {peer.id} ({peer.address})".center(68), + curses.A_REVERSE | curses.A_BOLD, + ) info_window.addstr( - 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", - curses.A_BOLD) + 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", curses.A_BOLD + ) for i, msg in enumerate(peer.last_messages): if msg.inbound: info_window.addstr( - i + 3, 1, f"{f'<--- {msg.msg_type} ({msg.size} bytes) ':68s}", curses.A_NORMAL) + i + 3, + 1, + f"{f'<--- {msg.msg_type} ({msg.size} bytes) ':68s}", + curses.A_NORMAL, + ) else: info_window.addstr( - i + 3, 1, f" {msg.msg_type} ({msg.size} byte) --->", curses.A_NORMAL) + i + 3, + 1, + f" {msg.msg_type} ({msg.size} byte) --->", + curses.A_NORMAL, + ) if __name__ == "__main__": if len(sys.argv) < 2: print("USAGE:", sys.argv[0], "path/to/bitcoind") exit() path = sys.argv[1] main(path) diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py index 42ce17ac4..894f6c36a 100755 --- a/contrib/zmq/zmq_sub.py +++ b/contrib/zmq/zmq_sub.py @@ -1,96 +1,99 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ ZMQ example using python3's asyncio Bitcoin should be started with the command line arguments: bitcoind -testnet -daemon \ -zmqpubrawtx=tcp://127.0.0.1:28332 \ -zmqpubrawblock=tcp://127.0.0.1:28332 \ -zmqpubhashtx=tcp://127.0.0.1:28332 \ -zmqpubhashblock=tcp://127.0.0.1:28332 \ -zmqpubsequence=tcp://127.0.0.1:28332 We use the asyncio library here. `self.handle()` installs itself as a future at the end of the function. Since it never returns with the event loop having an empty stack of futures, this creates an infinite loop. An alternative is to wrap the contents of `handle` inside `while True`. A blocking example using python 2.7 can be obtained from the git history: https://github.com/bitcoin/bitcoin/blob/37a7fe9e440b83e2364d5498931253937abe9294/contrib/zmq/zmq_sub.py """ import asyncio import binascii import signal import struct import sys import zmq.asyncio import zmq if (sys.version_info.major, sys.version_info.minor) < (3, 6): print("This example only works with Python 3.6 and greater") sys.exit(1) port = 28332 ip = "127.0.0.1" -class ZMQHandler(): +class ZMQHandler: def __init__(self): self.loop = asyncio.get_event_loop() self.zmqContext = zmq.asyncio.Context() self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) self.zmqSubSocket.setsockopt(zmq.RCVHWM, 0) self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "hashtx") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawblock") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "rawtx") self.zmqSubSocket.setsockopt_string(zmq.SUBSCRIBE, "sequence") self.zmqSubSocket.connect(f"tcp://{ip}:{port}") async def handle(self): topic, body, seq = await self.zmqSubSocket.recv_multipart() sequence = "Unknown" if len(seq) == 4: - sequence = str(struct.unpack('