diff --git a/.arclint b/.arclint --- a/.arclint +++ b/.arclint @@ -19,11 +19,7 @@ "autopep8": { "type": "autopep8", "version": ">=1.3.4", - "include": "(^contrib/.*\\.py$)", - "exclude": [ - "(^contrib/gitian-builder/)", - "(^contrib/apple-sdk-tools/)" - ], + "include": "(^contrib/(buildbot|devtools)/.*\\.py$)", "flags": [ "--aggressive", "--ignore=W503,W504", @@ -37,7 +33,8 @@ "exclude": [ "(^contrib/gitian-builder/)", "(^contrib/apple-sdk-tools/)", - "(^contrib/)" + "(^contrib/devtools/)", + "(^contrib/buildbot/)" ], "flags": [ "--preview" diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py --- a/contrib/gitian-build.py +++ b/contrib/gitian-build.py @@ -12,181 +12,397 @@ def setup(): global args, workdir - programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget'] + programs = ["ruby", "git", "apt-cacher-ng", "make", "wget"] if args.kvm: - programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils'] + programs += ["python-vm-builder", "qemu-kvm", "qemu-utils"] elif args.docker: - dockers = ['docker.io', 'docker-ce'] + dockers = ["docker.io", "docker-ce"] for i in dockers: - return_code = subprocess.call( - ['sudo', 'apt-get', 'install', '-qq', i]) + return_code = subprocess.call(["sudo", "apt-get", "install", "-qq", i]) if return_code == 0: break if return_code != 0: - print('Cannot find any way to install docker', file=sys.stderr) + print("Cannot find any way to install docker", file=sys.stderr) exit(1) else: - programs += ['lxc', 'debootstrap'] - subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs) - if not os.path.isdir('gitian-builder'): + programs += ["lxc", "debootstrap"] + subprocess.check_call(["sudo", "apt-get", "install", "-qq"] + programs) + if not os.path.isdir("gitian-builder"): subprocess.check_call( - ['git', 'clone', 'https://github.com/devrandom/gitian-builder.git']) - if not os.path.isdir('bitcoin-abc'): + ["git", "clone", "https://github.com/devrandom/gitian-builder.git"] + ) + if not os.path.isdir("bitcoin-abc"): subprocess.check_call( - ['git', 'clone', 'https://github.com/Bitcoin-ABC/bitcoin-abc.git']) - os.chdir('gitian-builder') - make_image_prog = ['bin/make-base-vm', - '--distro', 'debian', '--suite', 'buster', '--arch', 'amd64'] + ["git", "clone", "https://github.com/Bitcoin-ABC/bitcoin-abc.git"] + ) + os.chdir("gitian-builder") + make_image_prog = [ + "bin/make-base-vm", + "--distro", + "debian", + "--suite", + "buster", + "--arch", + "amd64", + ] if args.docker: - make_image_prog += ['--docker'] + make_image_prog += ["--docker"] elif not args.kvm: - make_image_prog += ['--lxc'] + make_image_prog += ["--lxc"] subprocess.check_call(make_image_prog) os.chdir(workdir) if args.is_bionic and not args.kvm and not args.docker: subprocess.check_call( - ['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) - print('Reboot is required') + ["sudo", "sed", "-i", "s/lxcbr0/br0/", "/etc/default/lxc-net"] + ) + print("Reboot is required") exit(0) def build(): global args, workdir - base_output_dir = 'bitcoin-binaries/' + args.version - os.makedirs(base_output_dir + '/src', exist_ok=True) - print('\nBuilding Dependencies\n') - os.chdir('gitian-builder') - os.makedirs('inputs', exist_ok=True) - - subprocess.check_call(['make', '-C', '../bitcoin-abc/depends', - 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common']) - - output_dir_src = '../' + base_output_dir + '/src' + base_output_dir = "bitcoin-binaries/" + args.version + os.makedirs(base_output_dir + "/src", exist_ok=True) + print("\nBuilding Dependencies\n") + os.chdir("gitian-builder") + os.makedirs("inputs", exist_ok=True) + + subprocess.check_call( + [ + "make", + "-C", + "../bitcoin-abc/depends", + "download", + "SOURCES_PATH=" + os.getcwd() + "/cache/common", + ] + ) + + output_dir_src = "../" + base_output_dir + "/src" if args.linux: - print('\nCompiling ' + args.version + ' Linux') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin=' + args.commit, - '--url', 'bitcoin=' + args.url, '../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + - '-linux', '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml']) - output_dir_linux = '../' + base_output_dir + '/linux' + print("\nCompiling " + args.version + " Linux") + subprocess.check_call( + [ + "bin/gbuild", + "-j", + args.jobs, + "-m", + args.memory, + "--commit", + "bitcoin=" + args.commit, + "--url", + "bitcoin=" + args.url, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-linux", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml", + ] + ) + output_dir_linux = "../" + base_output_dir + "/linux" os.makedirs(output_dir_linux, exist_ok=True) subprocess.check_call( - 'mv build/out/bitcoin-*.tar.gz ' + output_dir_linux, shell=True) + "mv build/out/bitcoin-*.tar.gz " + output_dir_linux, shell=True + ) subprocess.check_call( - 'mv build/out/src/bitcoin-*.tar.gz ' + output_dir_src, shell=True) + "mv build/out/src/bitcoin-*.tar.gz " + output_dir_src, shell=True + ) subprocess.check_call( - 'mv result/bitcoin-*-linux-res.yml ' + output_dir_linux, shell=True) + "mv result/bitcoin-*-linux-res.yml " + output_dir_linux, shell=True + ) if args.windows: - print('\nCompiling ' + args.version + ' Windows') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin=' + args.commit, - '--url', 'bitcoin=' + args.url, '../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + - '-win-unsigned', '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml']) - output_dir_win = '../' + base_output_dir + '/win' + print("\nCompiling " + args.version + " Windows") + subprocess.check_call( + [ + "bin/gbuild", + "-j", + args.jobs, + "-m", + args.memory, + "--commit", + "bitcoin=" + args.commit, + "--url", + "bitcoin=" + args.url, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-win-unsigned", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml", + ] + ) + output_dir_win = "../" + base_output_dir + "/win" os.makedirs(output_dir_win, exist_ok=True) subprocess.check_call( - 'mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/', shell=True) + "mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/", shell=True + ) subprocess.check_call( - 'mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe ' + output_dir_win, shell=True) + "mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe " + output_dir_win, + shell=True, + ) subprocess.check_call( - 'mv build/out/src/bitcoin-*.tar.gz ' + output_dir_src, shell=True) + "mv build/out/src/bitcoin-*.tar.gz " + output_dir_src, shell=True + ) subprocess.check_call( - 'mv result/bitcoin-*-win-res.yml ' + output_dir_win, shell=True) + "mv result/bitcoin-*-win-res.yml " + output_dir_win, shell=True + ) if args.macos: - print('\nCompiling ' + args.version + ' MacOS') - subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitcoin=' + args.commit, - '--url', 'bitcoin=' + args.url, '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + - '-osx-unsigned', '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml']) - output_dir_osx = '../' + base_output_dir + '/osx' + print("\nCompiling " + args.version + " MacOS") + subprocess.check_call( + [ + "bin/gbuild", + "-j", + args.jobs, + "-m", + args.memory, + "--commit", + "bitcoin=" + args.commit, + "--url", + "bitcoin=" + args.url, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-osx-unsigned", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml", + ] + ) + output_dir_osx = "../" + base_output_dir + "/osx" os.makedirs(output_dir_osx, exist_ok=True) subprocess.check_call( - 'mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/', shell=True) + "mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/", shell=True + ) subprocess.check_call( - 'mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg ' + output_dir_osx, shell=True) + "mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg " + output_dir_osx, + shell=True, + ) subprocess.check_call( - 'mv build/out/src/bitcoin-*.tar.gz ' + output_dir_src, shell=True) + "mv build/out/src/bitcoin-*.tar.gz " + output_dir_src, shell=True + ) subprocess.check_call( - 'mv result/bitcoin-*-osx-res.yml ' + output_dir_osx, shell=True) + "mv result/bitcoin-*-osx-res.yml " + output_dir_osx, shell=True + ) os.chdir(workdir) if args.commit_files: - print('\nCommitting ' + args.version + ' Unsigned Sigs\n') - os.chdir('gitian.sigs') + print("\nCommitting " + args.version + " Unsigned Sigs\n") + os.chdir("gitian.sigs") + subprocess.check_call(["git", "add", args.version + "-linux/" + args.signer]) subprocess.check_call( - ['git', 'add', args.version + '-linux/' + args.signer]) + ["git", "add", args.version + "-win-unsigned/" + args.signer] + ) subprocess.check_call( - ['git', 'add', args.version + '-win-unsigned/' + args.signer]) + ["git", "add", args.version + "-osx-unsigned/" + args.signer] + ) subprocess.check_call( - ['git', 'add', args.version + '-osx-unsigned/' + args.signer]) - subprocess.check_call( - ['git', 'commit', '-m', 'Add ' + args.version + ' unsigned sigs for ' + args.signer]) + [ + "git", + "commit", + "-m", + "Add " + args.version + " unsigned sigs for " + args.signer, + ] + ) os.chdir(workdir) def sign(): global args, workdir - os.chdir('gitian-builder') + os.chdir("gitian-builder") if args.windows: - print('\nSigning ' + args.version + ' Windows') - subprocess.check_call('cp inputs/bitcoin-' + args.version + - '-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz', shell=True) - subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature=' + args.commit, - '../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + '-win-signed', - '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml']) + print("\nSigning " + args.version + " Windows") + subprocess.check_call( + "cp inputs/bitcoin-" + + args.version + + "-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz", + shell=True, + ) + subprocess.check_call( + [ + "bin/gbuild", + "-i", + "--commit", + "signature=" + args.commit, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-win-signed", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml", + ] + ) subprocess.check_call( - 'mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/' + args.version, shell=True) + "mv build/out/bitcoin-*win64-setup.exe ../bitcoin-binaries/" + args.version, + shell=True, + ) if args.macos: - print('\nSigning ' + args.version + ' MacOS') - subprocess.check_call('cp inputs/bitcoin-' + args.version + - '-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz', shell=True) - subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature=' + args.commit, - '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml']) - subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version + '-osx-signed', - '--destination', '../gitian.sigs/', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml']) - subprocess.check_call('mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/' + - args.version + '/bitcoin-' + args.version + '-osx.dmg', shell=True) + print("\nSigning " + args.version + " MacOS") + subprocess.check_call( + "cp inputs/bitcoin-" + + args.version + + "-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz", + shell=True, + ) + subprocess.check_call( + [ + "bin/gbuild", + "-i", + "--commit", + "signature=" + args.commit, + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml", + ] + ) + subprocess.check_call( + [ + "bin/gsign", + "-p", + args.sign_prog, + "--signer", + args.signer, + "--release", + args.version + "-osx-signed", + "--destination", + "../gitian.sigs/", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml", + ] + ) + subprocess.check_call( + "mv build/out/bitcoin-osx-signed.dmg ../bitcoin-binaries/" + + args.version + + "/bitcoin-" + + args.version + + "-osx.dmg", + shell=True, + ) os.chdir(workdir) if args.commit_files: - print('\nCommitting ' + args.version + ' Signed Sigs\n') - os.chdir('gitian.sigs') + print("\nCommitting " + args.version + " Signed Sigs\n") + os.chdir("gitian.sigs") subprocess.check_call( - ['git', 'add', args.version + '-win-signed/' + args.signer]) + ["git", "add", args.version + "-win-signed/" + args.signer] + ) subprocess.check_call( - ['git', 'add', args.version + '-osx-signed/' + args.signer]) - subprocess.check_call(['git', 'commit', '-a', '-m', 'Add ' + - args.version + ' signed binary sigs for ' + args.signer]) + ["git", "add", args.version + "-osx-signed/" + args.signer] + ) + subprocess.check_call( + [ + "git", + "commit", + "-a", + "-m", + "Add " + args.version + " signed binary sigs for " + args.signer, + ] + ) os.chdir(workdir) def verify(): global args, workdir - os.chdir('gitian-builder') - - print('\nVerifying v' + args.version + ' Linux\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-linux', '../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml']) - print('\nVerifying v' + args.version + ' Windows\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-win-unsigned', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml']) - print('\nVerifying v' + args.version + ' MacOS\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-osx-unsigned', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml']) - print('\nVerifying v' + args.version + ' Signed Windows\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-win-signed', '../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml']) - print('\nVerifying v' + args.version + ' Signed MacOS\n') - subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version + - '-osx-signed', '../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml']) + os.chdir("gitian-builder") + + print("\nVerifying v" + args.version + " Linux\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-linux", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-linux.yml", + ] + ) + print("\nVerifying v" + args.version + " Windows\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-win-unsigned", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win.yml", + ] + ) + print("\nVerifying v" + args.version + " MacOS\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-osx-unsigned", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx.yml", + ] + ) + print("\nVerifying v" + args.version + " Signed Windows\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-win-signed", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-win-signer.yml", + ] + ) + print("\nVerifying v" + args.version + " Signed MacOS\n") + subprocess.check_call( + [ + "bin/gverify", + "-v", + "-d", + "../gitian.sigs/", + "-r", + args.version + "-osx-signed", + "../bitcoin-abc/contrib/gitian-descriptors/gitian-osx-signer.yml", + ] + ) os.chdir(workdir) @@ -195,110 +411,195 @@ global args, workdir num_cpus = multiprocessing.cpu_count() - parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version') - parser.add_argument('-c', '--commit', action='store_true', dest='commit', - help='Indicate that the version argument is for a commit or branch') - parser.add_argument('-p', '--pull', action='store_true', dest='pull', - help='Indicate that the version argument is the number of a github repository pull request') - parser.add_argument('-u', '--url', dest='url', default='https://github.com/Bitcoin-ABC/bitcoin-abc.git', - help='Specify the URL of the repository. Default is %(default)s') - parser.add_argument('-v', '--verify', action='store_true', - dest='verify', help='Verify the Gitian build') - parser.add_argument('-b', '--build', action='store_true', - dest='build', help='Do a Gitian build') - parser.add_argument('-s', '--sign', action='store_true', dest='sign', - help='Make signed binaries for Windows and MacOS') - parser.add_argument('-B', '--buildsign', action='store_true', - dest='buildsign', help='Build both signed and unsigned binaries') - parser.add_argument('-o', '--os', dest='os', default='lwm', - help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS') - parser.add_argument('-j', '--jobs', dest='jobs', default=str(num_cpus), - help='Number of processes to use. Default %(default)s') - parser.add_argument('-m', '--memory', dest='memory', default='3500', - help='Memory to allocate in MiB. Default %(default)s') - parser.add_argument('-k', '--kvm', action='store_true', - dest='kvm', help='Use KVM instead of LXC') - parser.add_argument('-d', '--docker', action='store_true', - dest='docker', help='Use Docker instead of LXC') - parser.add_argument('-S', '--setup', action='store_true', dest='setup', - help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)') - parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', - help='Create the assert file for detached signing. Will not commit anything.') - parser.add_argument('-n', '--no-commit', action='store_false', - dest='commit_files', help='Do not commit anything to git') + parser = argparse.ArgumentParser(usage="%(prog)s [options] signer version") + parser.add_argument( + "-c", + "--commit", + action="store_true", + dest="commit", + help="Indicate that the version argument is for a commit or branch", + ) + parser.add_argument( + "-p", + "--pull", + action="store_true", + dest="pull", + help=( + "Indicate that the version argument is the number of a github repository" + " pull request" + ), + ) + parser.add_argument( + "-u", + "--url", + dest="url", + default="https://github.com/Bitcoin-ABC/bitcoin-abc.git", + help="Specify the URL of the repository. Default is %(default)s", + ) + parser.add_argument( + "-v", + "--verify", + action="store_true", + dest="verify", + help="Verify the Gitian build", + ) + parser.add_argument( + "-b", "--build", action="store_true", dest="build", help="Do a Gitian build" + ) + parser.add_argument( + "-s", + "--sign", + action="store_true", + dest="sign", + help="Make signed binaries for Windows and MacOS", + ) + parser.add_argument( + "-B", + "--buildsign", + action="store_true", + dest="buildsign", + help="Build both signed and unsigned binaries", + ) + parser.add_argument( + "-o", + "--os", + dest="os", + default="lwm", + help=( + "Specify which Operating Systems the build is for. Default is %(default)s." + " l for Linux, w for Windows, m for MacOS" + ), + ) + parser.add_argument( + "-j", + "--jobs", + dest="jobs", + default=str(num_cpus), + help="Number of processes to use. Default %(default)s", + ) + parser.add_argument( + "-m", + "--memory", + dest="memory", + default="3500", + help="Memory to allocate in MiB. Default %(default)s", + ) + parser.add_argument( + "-k", "--kvm", action="store_true", dest="kvm", help="Use KVM instead of LXC" + ) + parser.add_argument( + "-d", + "--docker", + action="store_true", + dest="docker", + help="Use Docker instead of LXC", + ) + parser.add_argument( + "-S", + "--setup", + action="store_true", + dest="setup", + help=( + "Set up the Gitian building environment. Uses LXC. If you want to use KVM," + " use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)" + ), + ) + parser.add_argument( + "-D", + "--detach-sign", + action="store_true", + dest="detach_sign", + help="Create the assert file for detached signing. Will not commit anything.", + ) parser.add_argument( - 'signer', help='GPG signer to sign each build assert file') + "-n", + "--no-commit", + action="store_false", + dest="commit_files", + help="Do not commit anything to git", + ) + parser.add_argument("signer", help="GPG signer to sign each build assert file") parser.add_argument( - 'version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified') + "version", + help=( + "Version number, commit, or branch to build. If building a commit or" + " branch, the -c option must be specified" + ), + ) args = parser.parse_args() workdir = os.getcwd() - args.linux = 'l' in args.os - args.windows = 'w' in args.os - args.macos = 'm' in args.os + args.linux = "l" in args.os + args.windows = "w" in args.os + args.macos = "m" in args.os - args.is_bionic = b'bionic' in subprocess.check_output( - ['lsb_release', '-cs']) + args.is_bionic = b"bionic" in subprocess.check_output(["lsb_release", "-cs"]) if args.buildsign: args.build = True args.sign = True if args.kvm and args.docker: - raise Exception('Error: cannot have both kvm and docker') + raise Exception("Error: cannot have both kvm and docker") - args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign' + args.sign_prog = "true" if args.detach_sign else "gpg --detach-sign" # Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know # that we use lxc or docker if args.docker: - os.environ['USE_DOCKER'] = '1' + os.environ["USE_DOCKER"] = "1" elif not args.kvm: - os.environ['USE_LXC'] = '1' - if 'GITIAN_HOST_IP' not in os.environ.keys(): - os.environ['GITIAN_HOST_IP'] = '10.0.3.1' - if 'LXC_GUEST_IP' not in os.environ.keys(): - os.environ['LXC_GUEST_IP'] = '10.0.3.5' + os.environ["USE_LXC"] = "1" + if "GITIAN_HOST_IP" not in os.environ.keys(): + os.environ["GITIAN_HOST_IP"] = "10.0.3.1" + if "LXC_GUEST_IP" not in os.environ.keys(): + os.environ["LXC_GUEST_IP"] = "10.0.3.5" # Disable for MacOS if no SDK found if args.macos and not os.path.isfile( - 'gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz'): - print('Cannot build for MacOS, SDK does not exist. Will build for other OSes') + "gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz" + ): + print("Cannot build for MacOS, SDK does not exist. Will build for other OSes") args.macos = False script_name = os.path.basename(sys.argv[0]) # Signer and version shouldn't be empty - if args.signer == '': - print(script_name + ': Missing signer.') - print('Try ' + script_name + ' --help for more information') + if args.signer == "": + print(script_name + ": Missing signer.") + print("Try " + script_name + " --help for more information") exit(1) - if args.version == '': - print(script_name + ': Missing version.') - print('Try ' + script_name + ' --help for more information') + if args.version == "": + print(script_name + ": Missing version.") + print("Try " + script_name + " --help for more information") exit(1) # Add leading 'v' for tags if args.commit and args.pull: - raise Exception('Cannot have both commit and pull') - args.commit = ('' if args.commit else 'v') + args.version + raise Exception("Cannot have both commit and pull") + args.commit = ("" if args.commit else "v") + args.version if args.setup: setup() - os.chdir('bitcoin-abc') + os.chdir("bitcoin-abc") if args.pull: subprocess.check_call( - ['git', 'fetch', args.url, 'refs/pull/' + args.version + '/merge']) - os.chdir('../gitian-builder/inputs/bitcoin') + ["git", "fetch", args.url, "refs/pull/" + args.version + "/merge"] + ) + os.chdir("../gitian-builder/inputs/bitcoin") subprocess.check_call( - ['git', 'fetch', args.url, 'refs/pull/' + args.version + '/merge']) + ["git", "fetch", args.url, "refs/pull/" + args.version + "/merge"] + ) args.commit = subprocess.check_output( - ['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip() - args.version = 'pull-' + args.version + ["git", "show", "-s", "--format=%H", "FETCH_HEAD"], + universal_newlines=True, + encoding="utf8", + ).strip() + args.version = "pull-" + args.version print(args.commit) - subprocess.check_call(['git', 'fetch']) - subprocess.check_call(['git', 'checkout', args.commit]) + subprocess.check_call(["git", "fetch"]) + subprocess.check_call(["git", "checkout", args.commit]) os.chdir(workdir) if args.build: @@ -311,5 +612,5 @@ verify() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/linearize/linearize-data.py b/contrib/linearize/linearize-data.py --- a/contrib/linearize/linearize-data.py +++ b/contrib/linearize/linearize-data.py @@ -25,34 +25,40 @@ def hex_switchEndian(s): - """ Switches the endianness of a hex string (in pairs of hex chars) """ - pairList = [s[i:i + 2].encode() for i in range(0, len(s), 2)] - return b''.join(pairList[::-1]).decode() + """Switches the endianness of a hex string (in pairs of hex chars)""" + pairList = [s[i : i + 2].encode() for i in range(0, len(s), 2)] + return b"".join(pairList[::-1]).decode() def uint32(x): - return x & 0xffffffff + return x & 0xFFFFFFFF def bytereverse(x): - return uint32((((x) << 24) | (((x) << 8) & 0x00ff0000) | - (((x) >> 8) & 0x0000ff00) | ((x) >> 24))) + return uint32( + ( + ((x) << 24) + | (((x) << 8) & 0x00FF0000) + | (((x) >> 8) & 0x0000FF00) + | ((x) >> 24) + ) + ) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): - word = struct.unpack('@I', in_buf[i:i + 4])[0] - out_words.append(struct.pack('@I', bytereverse(word))) - return b''.join(out_words) + word = struct.unpack("@I", in_buf[i : i + 4])[0] + out_words.append(struct.pack("@I", bytereverse(word))) + return b"".join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): - out_words.append(in_buf[i:i + 4]) + out_words.append(in_buf[i : i + 4]) out_words.reverse() - return b''.join(out_words) + return b"".join(out_words) def calc_hdr_hash(blk_hdr): @@ -76,21 +82,22 @@ def get_blk_dt(blk_hdr): - members = struct.unpack(" self.maxOutSz): + if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz): self.outF.close() if self.setFileTime: os.utime(self.outFname, (int(time.time()), self.highTS)) @@ -159,8 +165,7 @@ (blkDate, blkTS) = get_blk_dt(blk_hdr) if self.timestampSplit and (blkDate > self.lastDate): - print("New month " + blkDate.strftime("%Y-%m") + - " @ " + self.hash_str) + print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str) self.lastDate = blkDate if self.outF: self.outF.close() @@ -173,10 +178,11 @@ if not self.outF: if self.fileOutput: - self.outFname = self.settings['output_file'] + self.outFname = self.settings["output_file"] else: self.outFname = os.path.join( - self.settings['output'], f"blk{self.outFn:05d}.dat") + self.settings["output"], f"blk{self.outFn:05d}.dat" + ) print("Output file " + self.outFname) self.outF = open(self.outFname, "wb") @@ -190,20 +196,26 @@ self.highTS = blkTS if (self.blkCountOut % 1000) == 0: - print('{} blocks scanned, {} blocks written (of {}, {:.1f}% complete)'.format( - self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex))) + print( + "{} blocks scanned, {} blocks written (of {}, {:.1f}% complete)".format( + self.blkCountIn, + self.blkCountOut, + len(self.blkindex), + 100.0 * self.blkCountOut / len(self.blkindex), + ) + ) def inFileName(self, fn): - return os.path.join(self.settings['input'], f"blk{fn:05d}.dat") + return os.path.join(self.settings["input"], f"blk{fn:05d}.dat") def fetchBlock(self, extent): - '''Fetch block contents from disk given extents''' + """Fetch block contents from disk given extents""" with open(self.inFileName(extent.fn), "rb") as f: f.seek(extent.offset) return f.read(extent.size) def copyOneBlock(self): - '''Find the next block to be written in the input, and copy it to the output.''' + """Find the next block to be written in the input, and copy it to the output.""" extent = self.blockExtents.pop(self.blkCountOut) if self.blkCountOut in self.outOfOrderData: # If the data is cached, use it from memory and remove from the @@ -227,29 +239,28 @@ return inhdr = self.inF.read(8) - if (not inhdr or (inhdr[0] == "\0")): + if not inhdr or (inhdr[0] == "\0"): self.inF.close() self.inF = None self.inFn = self.inFn + 1 continue inMagic = inhdr[:4] - if (inMagic != self.settings['netmagic']): + if inMagic != self.settings["netmagic"]: print("Invalid magic: " + inMagic.hex()) return inLenLE = inhdr[4:] su = struct.unpack(" Optional['FrameworkInfo']: + def fromOtoolLibraryLine(cls, line: str) -> Optional["FrameworkInfo"]: # Note: line must be trimmed if line == "": return None # Don't deploy system libraries (exception for libQtuitools and # libQtlucene). - if line.startswith("/System/Library/") or line.startswith( - "@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line): + if ( + line.startswith("/System/Library/") + or line.startswith("@executable_path") + or (line.startswith("/usr/lib/") and "libQt" not in line) + ): return None m = cls.reOLine.match(line) @@ -118,7 +122,9 @@ info.version = "-" info.installName = path - info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName + info.deployedInstallName = ( + "@executable_path/../Frameworks/" + info.binaryName + ) info.sourceFilePath = path info.destinationDirectory = cls.bundleFrameworkDirectory else: @@ -131,38 +137,44 @@ i += 1 if i == len(parts): raise RuntimeError( - "Could not find .framework or .dylib in otool line: " + line) + "Could not find .framework or .dylib in otool line: " + line + ) info.frameworkName = parts[i] info.frameworkDirectory = "/".join(parts[:i]) info.frameworkPath = os.path.join( - info.frameworkDirectory, info.frameworkName) + info.frameworkDirectory, info.frameworkName + ) info.binaryName = parts[i + 3] - info.binaryDirectory = "/".join(parts[i + 1:i + 3]) - info.binaryPath = os.path.join( - info.binaryDirectory, info.binaryName) + info.binaryDirectory = "/".join(parts[i + 1 : i + 3]) + info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName) info.version = parts[i + 2] - info.deployedInstallName = "@executable_path/../Frameworks/" + \ - os.path.join(info.frameworkName, info.binaryPath) + info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join( + info.frameworkName, info.binaryPath + ) info.destinationDirectory = os.path.join( - cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory) + cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory + ) info.sourceResourcesDirectory = os.path.join( - info.frameworkPath, "Resources") - info.sourceContentsDirectory = os.path.join( - info.frameworkPath, "Contents") + info.frameworkPath, "Resources" + ) + info.sourceContentsDirectory = os.path.join(info.frameworkPath, "Contents") info.sourceVersionContentsDirectory = os.path.join( - info.frameworkPath, "Versions", info.version, "Contents") + info.frameworkPath, "Versions", info.version, "Contents" + ) info.destinationResourcesDirectory = os.path.join( - cls.bundleFrameworkDirectory, info.frameworkName, "Resources") + cls.bundleFrameworkDirectory, info.frameworkName, "Resources" + ) info.destinationVersionContentsDirectory = os.path.join( cls.bundleFrameworkDirectory, info.frameworkName, "Versions", info.version, - "Contents") + "Contents", + ) return info @@ -214,19 +226,18 @@ if verbose >= 3: print("Inspecting with otool: " + binaryPath) otoolbin = os.getenv("OTOOL", "otool") - otool = subprocess.Popen([otoolbin, - "-L", - binaryPath], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + otool = subprocess.Popen( + [otoolbin, "-L", binaryPath], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) o_stdout, o_stderr = otool.communicate() if otool.returncode != 0: if verbose >= 1: sys.stderr.write(o_stderr) sys.stderr.flush() - raise RuntimeError( - f"otool failed with return code {otool.returncode}") + raise RuntimeError(f"otool failed with return code {otool.returncode}") otoolLines = o_stdout.split("\n") otoolLines.pop(0) # First line is the inspected binary @@ -252,8 +263,7 @@ subprocess.check_call([installnametoolbin, "-" + action] + list(args)) -def changeInstallName(oldName: str, newName: str, - binaryPath: str, verbose: int): +def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int): if verbose >= 3: print("Using install_name_tool:") print(" in", binaryPath) @@ -278,8 +288,7 @@ subprocess.check_call([stripbin, "-x", binaryPath]) -def copyFramework(framework: FrameworkInfo, path: str, - verbose: int) -> Optional[str]: +def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional[str]: if framework.sourceFilePath.startswith("Qt"): # standard place for Nokia Qt installer's frameworks fromPath = "/Library/Frameworks/" + framework.sourceFilePath @@ -307,14 +316,14 @@ os.chmod(toPath, permissions.st_mode | stat.S_IWRITE) if not framework.isDylib(): # Copy resources for real frameworks - linkfrom = os.path.join( path, "Contents", "Frameworks", framework.frameworkName, "Versions", - "Current") + "Current", + ) linkto = framework.version if not os.path.exists(linkfrom): os.symlink(linkto, linkfrom) @@ -322,8 +331,7 @@ print("Linked:", linkfrom, "->", linkto) fromResourcesDir = framework.sourceResourcesDirectory if os.path.exists(fromResourcesDir): - toResourcesDir = os.path.join( - path, framework.destinationResourcesDirectory) + toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory) shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True) if verbose >= 3: print("Copied resources:", fromResourcesDir) @@ -333,7 +341,8 @@ fromContentsDir = framework.sourceContentsDirectory if os.path.exists(fromContentsDir): toContentsDir = os.path.join( - path, framework.destinationVersionContentsDirectory) + path, framework.destinationVersionContentsDirectory + ) shutil.copytree(fromContentsDir, toContentsDir, symlinks=True) if verbose >= 3: print("Copied Contents:", fromContentsDir) @@ -341,15 +350,17 @@ # Copy qt_menu.nib (applies to non-framework layout) elif framework.frameworkName.startswith("libQtGui"): qtMenuNibSourcePath = os.path.join( - framework.frameworkDirectory, "Resources", "qt_menu.nib") + framework.frameworkDirectory, "Resources", "qt_menu.nib" + ) qtMenuNibDestinationPath = os.path.join( - path, "Contents", "Resources", "qt_menu.nib") + path, "Contents", "Resources", "qt_menu.nib" + ) if os.path.exists(qtMenuNibSourcePath) and not os.path.exists( - qtMenuNibDestinationPath): + qtMenuNibDestinationPath + ): shutil.copytree( - qtMenuNibSourcePath, - qtMenuNibDestinationPath, - symlinks=True) + qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True + ) if verbose >= 3: print("Copied for libQtGui:", qtMenuNibSourcePath) print(" to:", qtMenuNibDestinationPath) @@ -357,8 +368,14 @@ return toPath -def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPath: str, strip: bool, - verbose: int, deploymentInfo: Optional[DeploymentInfo] = None) -> DeploymentInfo: +def deployFrameworks( + frameworks: List[FrameworkInfo], + bundlePath: str, + binaryPath: str, + strip: bool, + verbose: int, + deploymentInfo: Optional[DeploymentInfo] = None, +) -> DeploymentInfo: if deploymentInfo is None: deploymentInfo = DeploymentInfo() @@ -374,17 +391,16 @@ deploymentInfo.detectQtPath(framework.frameworkDirectory) if framework.installName.startswith( - "@executable_path") or framework.installName.startswith(bundlePath): + "@executable_path" + ) or framework.installName.startswith(bundlePath): if verbose >= 2: print(framework.frameworkName, "already deployed, skipping.") continue # install_name_tool the new id into the binary changeInstallName( - framework.installName, - framework.deployedInstallName, - binaryPath, - verbose) + framework.installName, framework.deployedInstallName, binaryPath, verbose + ) # Copy framework to app bundle. deployedBinaryPath = copyFramework(framework, bundlePath, verbose) @@ -396,10 +412,7 @@ runStrip(deployedBinaryPath, verbose) # install_name_tool it a new id. - changeIdentification( - framework.deployedInstallName, - deployedBinaryPath, - verbose) + changeIdentification(framework.deployedInstallName, deployedBinaryPath, verbose) # Check for framework dependencies dependencies = getFrameworks(deployedBinaryPath, verbose) @@ -408,30 +421,46 @@ dependency.installName, dependency.deployedInstallName, deployedBinaryPath, - verbose) + verbose, + ) # Deploy framework if necessary. - if dependency.frameworkName not in deploymentInfo.deployedFrameworks and dependency not in frameworks: + if ( + dependency.frameworkName not in deploymentInfo.deployedFrameworks + and dependency not in frameworks + ): frameworks.append(dependency) return deploymentInfo def deployFrameworksForAppBundle( - applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int) -> DeploymentInfo: + applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int +) -> DeploymentInfo: frameworks = getFrameworks(applicationBundle.binaryPath, verbose) if len(frameworks) == 0 and verbose >= 1: print( "Warning: Could not find any external frameworks to deploy in {}.".format( - applicationBundle.path)) + applicationBundle.path + ) + ) return DeploymentInfo() else: return deployFrameworks( - frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose) - - -def deployPlugins(appBundleInfo: ApplicationBundleInfo, - deploymentInfo: DeploymentInfo, strip: bool, verbose: int): + frameworks, + applicationBundle.path, + applicationBundle.binaryPath, + strip, + verbose, + ) + + +def deployPlugins( + appBundleInfo: ApplicationBundleInfo, + deploymentInfo: DeploymentInfo, + strip: bool, + verbose: int, +): # Lookup available plugins, exclude unneeded plugins = [] if deploymentInfo.pluginPath is None: @@ -523,7 +552,10 @@ if pluginName.endswith("_debug.dylib"): # Skip debug plugins continue - elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib": + elif ( + pluginPath == "imageformats/libqsvg.dylib" + or pluginPath == "iconengines/libqsvgicon.dylib" + ): # Deploy the svg plugins only if QtSvg is in use if not deploymentInfo.usesFramework("QtSvg"): continue @@ -552,19 +584,12 @@ for pluginDirectory, pluginName in plugins: if verbose >= 2: - print( - "Processing plugin", - os.path.join( - pluginDirectory, - pluginName), - "...") + print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...") sourcePath = os.path.join( - deploymentInfo.pluginPath, - pluginDirectory, - pluginName) - destinationDirectory = os.path.join( - appBundleInfo.pluginPath, pluginDirectory) + deploymentInfo.pluginPath, pluginDirectory, pluginName + ) + destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory) if not os.path.exists(destinationDirectory): os.makedirs(destinationDirectory) @@ -584,7 +609,8 @@ dependency.installName, dependency.deployedInstallName, destinationPath, - verbose) + verbose, + ) # Deploy framework if necessary. if dependency.frameworkName not in deploymentInfo.deployedFrameworks: @@ -594,7 +620,8 @@ destinationPath, strip, verbose, - deploymentInfo) + deploymentInfo, + ) qt_conf = """[Paths] @@ -602,7 +629,8 @@ Plugins=PlugIns """ -ap = ArgumentParser(description="""Improved version of macdeployqt. +ap = ArgumentParser( + description="""Improved version of macdeployqt. Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file. Note, that the "dist" folder will be deleted before deploying on each run. @@ -611,71 +639,98 @@ Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments to the codesign tool. -E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""") +E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""" +) -ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", - help="application bundle to be deployed") +ap.add_argument( + "app_bundle", + nargs=1, + metavar="app-bundle", + help="application bundle to be deployed", +) ap.add_argument( "-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", - help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug") + help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug", +) ap.add_argument( "-no-plugins", dest="plugins", action="store_false", default=True, - help="skip plugin deployment") + help="skip plugin deployment", +) ap.add_argument( "-no-strip", dest="strip", action="store_false", default=True, - help="don't run 'strip' on the binaries") + help="don't run 'strip' on the binaries", +) ap.add_argument( "-sign", dest="sign", action="store_true", default=False, - help="sign .app bundle with codesign tool") + help="sign .app bundle with codesign tool", +) ap.add_argument( "-dmg", nargs="?", const="", metavar="basename", - help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used") + help=( + "create a .dmg disk image; if basename is not specified, a camel-cased version" + " of the app name is used" + ), +) ap.add_argument( "-fancy", nargs=1, metavar="plist", default=[], - help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work") + help=( + "make a fancy looking disk image using the given plist file with instructions;" + " requires -dmg to work" + ), +) ap.add_argument( "-add-qt-tr", nargs=1, metavar="languages", default=[], - help="add Qt translation files to the bundle's resources; the language list must be separated with commas, not with whitespace") + help=( + "add Qt translation files to the bundle's resources; the language list must be" + " separated with commas, not with whitespace" + ), +) ap.add_argument( "-translations-dir", nargs=1, metavar="path", default=None, - help="Path to Qt's translation files") + help="Path to Qt's translation files", +) ap.add_argument( "-add-resources", nargs="+", metavar="path", default=[], - help="list of additional files or folders to be copied into the bundle's resources; must be the last argument") + help=( + "list of additional files or folders to be copied into the bundle's resources;" + " must be the last argument" + ), +) ap.add_argument( "-volname", nargs=1, metavar="volname", default=[], - help="custom volume name for dmg") + help="custom volume name for dmg", +) config = ap.parse_args() @@ -687,8 +742,7 @@ if not os.path.exists(app_bundle): if verbose >= 1: - sys.stderr.write( - f"Error: Could not find app bundle \"{app_bundle}\"\n") + sys.stderr.write(f'Error: Could not find app bundle "{app_bundle}"\n') sys.exit(1) app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0] @@ -701,17 +755,17 @@ else: if verbose >= 1: sys.stderr.write( - f"Error: Could not find translation dir \"{translations_dir}\"\n") + f'Error: Could not find translation dir "{translations_dir}"\n' + ) sys.exit(1) # ------------------------------------------------ for p in config.add_resources: if verbose >= 3: - print(f"Checking for \"{p}\"...") + print(f'Checking for "{p}"...') if not os.path.exists(p): if verbose >= 1: - sys.stderr.write( - f"Error: Could not find additional resource file \"{p}\"\n") + sys.stderr.write(f'Error: Could not find additional resource file "{p}"\n') sys.exit(1) # ------------------------------------------------ @@ -724,16 +778,17 @@ except ImportError: if verbose >= 1: sys.stderr.write( - "Error: Could not import plistlib which is required for fancy disk images.\n") + "Error: Could not import plistlib which is required for fancy disk" + " images.\n" + ) sys.exit(1) p = config.fancy[0] if verbose >= 3: - print(f"Fancy: Loading \"{p}\"...") + print(f'Fancy: Loading "{p}"...') if not os.path.exists(p): if verbose >= 1: - sys.stderr.write( - f"Error: Could not find fancy disk image plist at \"{p}\"\n") + sys.stderr.write(f'Error: Could not find fancy disk image plist at "{p}"\n') sys.exit(1) try: @@ -741,47 +796,48 @@ except BaseException: if verbose >= 1: sys.stderr.write( - f"Error: Could not parse fancy disk image plist at \"{p}\"\n") + f'Error: Could not parse fancy disk image plist at "{p}"\n' + ) sys.exit(1) try: assert "window_bounds" not in fancy or ( - isinstance( - fancy["window_bounds"], - list) and len( - fancy["window_bounds"]) == 4) + isinstance(fancy["window_bounds"], list) + and len(fancy["window_bounds"]) == 4 + ) assert "background_picture" not in fancy or isinstance( - fancy["background_picture"], str) + fancy["background_picture"], str + ) assert "icon_size" not in fancy or isinstance(fancy["icon_size"], int) assert "applications_symlink" not in fancy or isinstance( - fancy["applications_symlink"], bool) + fancy["applications_symlink"], bool + ) if "items_position" in fancy: assert isinstance(fancy["items_position"], dict) for key, value in fancy["items_position"].items(): - assert isinstance( - value, - list) and len(value) == 2 and isinstance( - value[0], - int) and isinstance( - value[1], - int) + assert ( + isinstance(value, list) + and len(value) == 2 + and isinstance(value[0], int) + and isinstance(value[1], int) + ) except BaseException: if verbose >= 1: - sys.stderr.write( - f"Error: Bad format of fancy disk image plist at \"{p}\"\n") + sys.stderr.write(f'Error: Bad format of fancy disk image plist at "{p}"\n') sys.exit(1) if "background_picture" in fancy: bp = fancy["background_picture"] if verbose >= 3: - print(f"Fancy: Resolving background picture \"{bp}\"...") + print(f'Fancy: Resolving background picture "{bp}"...') if not os.path.exists(bp): bp = os.path.join(os.path.dirname(p), bp) if not os.path.exists(bp): if verbose >= 1: sys.stderr.write( - "Error: Could not find background picture at \"{}\" or \"{}\"\n".format( - fancy["background_picture"], bp)) + 'Error: Could not find background picture at "{}" or "{}"\n' + .format(fancy["background_picture"], bp) + ) sys.exit(1) else: fancy["background_picture"] = bp @@ -824,13 +880,15 @@ try: deploymentInfo = deployFrameworksForAppBundle( - applicationBundle, config.strip, verbose) + applicationBundle, config.strip, verbose + ) if deploymentInfo.qtPath is None: deploymentInfo.qtPath = os.getenv("QTDIR", None) if deploymentInfo.qtPath is None: if verbose >= 1: sys.stderr.write( - "Warning: Could not detect Qt's path, skipping plugin deployment!\n") + "Warning: Could not detect Qt's path, skipping plugin deployment!\n" + ) config.plugins = False except RuntimeError as e: if verbose >= 1: @@ -863,16 +921,16 @@ else: sys.stderr.write("Error: Could not find Qt translation path\n") sys.exit(1) - add_qt_tr = [f"qt_{lng}.qm" - for lng in config.add_qt_tr[0].split(",")] + add_qt_tr = [f"qt_{lng}.qm" for lng in config.add_qt_tr[0].split(",")] for lng_file in add_qt_tr: p = os.path.join(qt_tr_dir, lng_file) if verbose >= 3: - print(f"Checking for \"{p}\"...") + print(f'Checking for "{p}"...') if not os.path.exists(p): if verbose >= 1: sys.stderr.write( - f"Error: Could not find Qt translation file \"{lng_file}\"\n") + f'Error: Could not find Qt translation file "{lng_file}"\n' + ) sys.exit(1) # ------------------------------------------------ @@ -891,17 +949,14 @@ for lng_file in add_qt_tr: if verbose >= 3: print( - os.path.join( - qt_tr_dir, - lng_file), + os.path.join(qt_tr_dir, lng_file), "->", - os.path.join( - applicationBundle.resourcesPath, - lng_file)) + os.path.join(applicationBundle.resourcesPath, lng_file), + ) shutil.copy2( - os.path.join( - qt_tr_dir, lng_file), os.path.join( - applicationBundle.resourcesPath, lng_file)) + os.path.join(qt_tr_dir, lng_file), + os.path.join(applicationBundle.resourcesPath, lng_file), + ) # ------------------------------------------------ @@ -919,13 +974,14 @@ # ------------------------------------------------ -if config.sign and 'CODESIGNARGS' not in os.environ: +if config.sign and "CODESIGNARGS" not in os.environ: print("You must set the CODESIGNARGS environment variable. Skipping signing.") elif config.sign: if verbose >= 1: print(f"Code-signing app bundle {target}") subprocess.check_call( - f"codesign --force {os.environ['CODESIGNARGS']} {target}", shell=True) + f"codesign --force {os.environ['CODESIGNARGS']} {target}", shell=True + ) # ------------------------------------------------ @@ -970,12 +1026,13 @@ srcfolder="dist", format="UDBZ", volname=volname, - ov=True) + ov=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) else: if verbose >= 3: - print("Determining size of \"dist\"...") + print('Determining size of "dist"...') size = 0 for path, dirs, files in os.walk("dist"): for file in files: @@ -992,7 +1049,8 @@ format="UDRW", size=size, volname=volname, - ov=True) + ov=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) @@ -1005,7 +1063,8 @@ readwrite=True, noverify=True, noautoopen=True, - capture_stdout=True) + capture_stdout=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) @@ -1018,8 +1077,8 @@ if "background_picture" in fancy: bg_path = os.path.join( - disk_root, ".background", os.path.basename( - fancy["background_picture"])) + disk_root, ".background", os.path.basename(fancy["background_picture"]) + ) os.mkdir(os.path.dirname(bg_path)) if verbose >= 3: print(fancy["background_picture"], "->", bg_path) @@ -1028,11 +1087,7 @@ bg_path = None if fancy.get("applications_symlink", False): - os.symlink( - "/Applications", - os.path.join( - disk_root, - "Applications")) + os.symlink("/Applications", os.path.join(disk_root, "Applications")) # The Python appscript package broke with OSX 10.8 and isn't being fixed. # So we now build up an AppleScript string and use the osascript command @@ -1062,12 +1117,15 @@ """) itemscript = Template( - 'set position of item "${item}" of container window to {${position}}') + 'set position of item "${item}" of container window to {${position}}' + ) items_positions = [] if "items_position" in fancy: for name, position in fancy["items_position"].items(): - params = {"item": name, "position": ",".join( - [str(p) for p in position])} + params = { + "item": name, + "position": ",".join([str(p) for p in position]), + } items_positions.append(itemscript.substitute(params)) params = { @@ -1075,28 +1133,30 @@ "window_bounds": "300,300,800,620", "icon_size": "96", "background_commands": "", - "items_positions": "\n ".join(items_positions) + "items_positions": "\n ".join(items_positions), } if "window_bounds" in fancy: - params["window_bounds"] = ",".join( - [str(p) for p in fancy["window_bounds"]]) + params["window_bounds"] = ",".join([str(p) for p in fancy["window_bounds"]]) if "icon_size" in fancy: params["icon_size"] = str(fancy["icon_size"]) if bg_path is not None: # Set background file, then call SetFile to make it invisible. # (note: making it invisible first makes set background picture fail) - bgscript = Template("""set background picture of theViewOptions to file ".background:$bgpic" - do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """) + bgscript = Template( + """set background picture of theViewOptions to file ".background:$bgpic" + do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """ + ) params["background_commands"] = bgscript.substitute( - {"bgpic": os.path.basename(bg_path), "disk": params["disk"]}) + {"bgpic": os.path.basename(bg_path), "disk": params["disk"]} + ) s = appscript.substitute(params) if verbose >= 2: print("Running AppleScript:") print(s) - p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE) - p.communicate(input=s.encode('utf-8')) + p = subprocess.Popen(["osascript", "-"], stdin=subprocess.PIPE) + p.communicate(input=s.encode("utf-8")) if p.returncode: print("Error running osascript.") @@ -1110,7 +1170,8 @@ dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", - ov=True) + ov=True, + ) except subprocess.CalledProcessError as e: sys.exit(e.returncode) diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py --- a/contrib/message-capture/message-capture-parser.py +++ b/contrib/message-capture/message-capture-parser.py @@ -15,10 +15,7 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Union -sys.path.append( - os.path.join( - os.path.dirname(__file__), - '../../test/functional')) +sys.path.append(os.path.join(os.path.dirname(__file__), "../../test/functional")) from test_framework.messages import ser_uint256 # noqa: E402 from test_framework.p2p import MESSAGEMAP # noqa: E402 @@ -61,7 +58,7 @@ class ProgressBar: def __init__(self, total: float): self.total = total - self.running = 0. + self.running = 0.0 def set_progress(self, progress: float): cols = shutil.get_terminal_size()[0] @@ -69,11 +66,12 @@ return max_blocks = cols - 9 num_blocks = int(max_blocks * progress) - print('\r[ {}{} ] {:3.0f}%' - .format('#' * num_blocks, - ' ' * (max_blocks - num_blocks), - progress * 100), - end='') + print( + "\r[ {}{} ] {:3.0f}%".format( + "#" * num_blocks, " " * (max_blocks - num_blocks), progress * 100 + ), + end="", + ) def update(self, more: float): self.running += more @@ -89,7 +87,11 @@ val = getattr(obj, slot, None) if slot in HASH_INTS and isinstance(val, int): ret[slot] = ser_uint256(val).hex() - elif slot in HASH_INT_VECTORS and isinstance(val, list) and isinstance(val[0], int): + elif ( + slot in HASH_INT_VECTORS + and isinstance(val, list) + and isinstance(val[0], int) + ): ret[slot] = [ser_uint256(a).hex() for a in val] else: ret[slot] = to_jsonable(val) @@ -102,9 +104,10 @@ return obj -def process_file(path: str, messages: List[Any], recv: bool, - progress_bar: Optional[ProgressBar]) -> None: - with open(path, 'rb') as f_in: +def process_file( + path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar] +) -> None: + with open(path, "rb") as f_in: if progress_bar: bytes_read = 0 @@ -121,7 +124,7 @@ break tmp_header = BytesIO(tmp_header_raw) time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") - msgtype: bytes = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] + msgtype: bytes = tmp_header.read(MSGTYPE_SIZE).split(b"\x00", 1)[0] length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # Start converting the message to a dictionary @@ -148,7 +151,8 @@ messages.append(msg_dict) print( f"WARNING - Unrecognized message type {msgtype!r} in {path}", - file=sys.stderr) + file=sys.stderr, + ) continue # Deserialize the message @@ -167,7 +171,8 @@ messages.append(msg_dict) print( f"WARNING - Unable to deserialize message in {path}", - file=sys.stderr) + file=sys.stderr, + ) continue # Convert body of message into a jsonable object @@ -187,23 +192,26 @@ def main(): parser = argparse.ArgumentParser( description=__doc__, - epilog="EXAMPLE \n\t{0} -o out.json /message_capture/**/*.dat".format( - sys.argv[0]), - formatter_class=argparse.RawTextHelpFormatter) + epilog=( + f"EXAMPLE \n\t{sys.argv[0]} -o out.json /message_capture/**/*.dat" + ), + formatter_class=argparse.RawTextHelpFormatter, + ) parser.add_argument( - "capturepaths", - nargs='+', - help="binary message capture files to parse.") + "capturepaths", nargs="+", help="binary message capture files to parse." + ) + parser.add_argument("-o", "--output", help="output file. If unset print to stdout") parser.add_argument( - "-o", "--output", - help="output file. If unset print to stdout") - parser.add_argument( - "-n", "--no-progress-bar", - action='store_true', - help="disable the progress bar. Automatically set if the output is not a terminal") + "-n", + "--no-progress-bar", + action="store_true", + help=( + "disable the progress bar. Automatically set if the output is not a" + " terminal" + ), + ) args = parser.parse_args() - capturepaths = [Path.cwd() / Path(capturepath) - for capturepath in args.capturepaths] + capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] output = Path.cwd() / Path(args.output) if args.output else False use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() @@ -215,19 +223,15 @@ progress_bar = None for capture in capturepaths: - process_file( - str(capture), - messages, - "recv" in capture.stem, - progress_bar) + process_file(str(capture), messages, "recv" in capture.stem, progress_bar) - messages.sort(key=lambda msg: msg['time']) + messages.sort(key=lambda msg: msg["time"]) if use_progress_bar: progress_bar.set_progress(1) jsonrep = json.dumps(messages) if output: - with open(str(output), 'w+', encoding="utf8") as f_out: + with open(str(output), "w+", encoding="utf8") as f_out: f_out.write(jsonrep) else: print(jsonrep) diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py --- a/contrib/seeds/makeseeds.py +++ b/contrib/seeds/makeseeds.py @@ -16,17 +16,15 @@ NSEEDS = 512 MAX_SEEDS_PER_ASN = { - 'ipv4': 6, - 'ipv6': 10, + "ipv4": 6, + "ipv6": 10, } MIN_BLOCKS = 760000 -PATTERN_IPV4 = re.compile( - r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") +PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$") PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$") -PATTERN_ONION = re.compile( - r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") +PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$") # Used to only select nodes with a user agent string compatible with the # eCash network. @@ -34,7 +32,7 @@ def parseline(line: str) -> Union[dict, None]: - """ Parses a line from `seeds_main.txt` into a dictionary of details for that line. + """Parses a line from `seeds_main.txt` into a dictionary of details for that line. or `None`, if the line could not be parsed. """ sline = line.split() @@ -45,7 +43,7 @@ # The user agent is at the end of the line. It may contain space, so we # concatenate. for i in range(12, len(sline)): - sline[11] += ' ' + sline[i] + sline[11] += " " + sline[i] # Remove leftovers del sline[12:] @@ -60,13 +58,13 @@ if m is None: return None else: - net = 'onion' + net = "onion" ipstr = sortkey = m.group(1) port = int(m.group(2)) else: - net = 'ipv6' + net = "ipv6" # Not interested in localhost - if m.group(1) in ['::']: + if m.group(1) in ["::"]: return None ipstr = m.group(1) # XXX parse IPv6 into number, could use name_to_ipv6 from @@ -82,7 +80,7 @@ ip = ip + (int(m.group(i + 2)) << (8 * (3 - i))) if ip == 0: return None - net = 'ipv4' + net = "ipv4" sortkey = ip ipstr = m.group(1) port = int(m.group(6)) @@ -103,80 +101,88 @@ blocks = int(sline[8]) # Construct result. return { - 'net': net, - 'ip': ipstr, - 'port': port, - 'ipnum': ip, - 'uptime': uptime30, - 'lastsuccess': lastsuccess, - 'version': version, - 'agent': agent, - 'service': service, - 'blocks': blocks, - 'sortkey': sortkey, + "net": net, + "ip": ipstr, + "port": port, + "ipnum": ip, + "uptime": uptime30, + "lastsuccess": lastsuccess, + "version": version, + "agent": agent, + "service": service, + "blocks": blocks, + "sortkey": sortkey, } def dedup(ips: List[Dict]) -> List[Dict]: - """ Remove duplicates from `ips` where multiple ips share address and port. """ + """Remove duplicates from `ips` where multiple ips share address and port.""" d = {} for ip in ips: - d[ip['ip'], ip['port']] = ip + d[ip["ip"], ip["port"]] = ip return list(d.values()) def filtermultiport(ips: List[Dict]) -> List[Dict]: - """ Filter out hosts with more nodes per IP""" + """Filter out hosts with more nodes per IP""" hist = collections.defaultdict(list) for ip in ips: - hist[ip['sortkey']].append(ip) + hist[ip["sortkey"]].append(ip) return [value[0] for (key, value) in list(hist.items()) if len(value) == 1] def lookup_asn(net: str, ip: str) -> Union[int, None]: - """ Look up the asn for an `ip` address by querying cymru.com + """Look up the asn for an `ip` address by querying cymru.com on network `net` (e.g. ipv4 or ipv6). Returns in integer ASN or None if it could not be found. """ try: - if net == 'ipv4': + if net == "ipv4": ipaddr = ip - prefix = '.origin' + prefix = ".origin" else: # http://www.team-cymru.com/IP-ASN-mapping.html # 2001:4860:b002:23::68 res = str() # pick the first 4 nibbles - for nb in ip.split(':')[:4]: + for nb in ip.split(":")[:4]: # right padded with '0' for c in nb.zfill(4): # 2001 4860 b002 0023 - res += c + '.' + res += c + "." # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3 - ipaddr = res.rstrip('.') - prefix = '.origin6' - - asn = int([x.to_text() for x in dns.resolver.query('.'.join( - reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com', - 'TXT').response.answer][0].split('\"')[1].split(' ')[0]) + ipaddr = res.rstrip(".") + prefix = ".origin6" + + asn = int( + [ + x.to_text() + for x in dns.resolver.query( + ".".join(reversed(ipaddr.split("."))) + prefix + ".asn.cymru.com", + "TXT", + ).response.answer + ][0] + .split('"')[1] + .split(" ")[0] + ) return asn except Exception: sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n') return None + # Based on Greg Maxwell's seed_filter.py -def filterbyasn(ips: List[Dict], max_per_asn: Dict, - max_per_net: int) -> List[Dict]: - """ Prunes `ips` by +def filterbyasn(ips: List[Dict], max_per_asn: Dict, max_per_net: int) -> List[Dict]: + """Prunes `ips` by (a) trimming ips to have at most `max_per_net` ips from each net (e.g. ipv4, ipv6); and (b) trimming ips to have at most `max_per_asn` ips from each asn in each net. """ # Sift out ips by type - ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']] - ips_onion = [ip for ip in ips if ip['net'] == 'onion'] + ips_ipv46 = [ip for ip in ips if ip["net"] in ["ipv4", "ipv6"]] + ips_onion = [ip for ip in ips if ip["net"] == "onion"] # Filter IPv46 by ASN, and limit to max_per_net per network result = [] @@ -189,20 +195,21 @@ print( f"{i:6d}/{len(ips_ipv46)} [{100*i/len(ips_ipv46):04.1f}%]\r", file=sys.stderr, - end='', - flush=True) + end="", + flush=True, + ) - if net_count[ip['net']] == max_per_net: + if net_count[ip["net"]] == max_per_net: # do not add this ip as we already too many # ips from this network continue - asn = lookup_asn(ip['net'], ip['ip']) - if asn is None or asn_count[asn] == max_per_asn[ip['net']]: + asn = lookup_asn(ip["net"], ip["ip"]) + if asn is None or asn_count[asn] == max_per_asn[ip["net"]]: # do not add this ip as we already have too many # ips from this ASN on this network continue asn_count[asn] += 1 - net_count[ip['net']] += 1 + net_count[ip["net"]] += 1 result.append(ip) # Add back Onions (up to max_per_net) @@ -211,11 +218,11 @@ def ip_stats(ips: List[Dict]) -> str: - """ Format and return pretty string from `ips`. """ + """Format and return pretty string from `ips`.""" hist: Dict[str, int] = collections.defaultdict(int) for ip in ips: if ip is not None: - hist[ip['net']] += 1 + hist[ip["net"]] += 1 return f"{hist['ipv4']:6d} {hist['ipv6']:6d} {hist['onion']:6d}" @@ -225,59 +232,58 @@ ips = [parseline(line) for line in lines] print( - '\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', - file=sys.stderr) - print(f'{ip_stats(ips):s} Initial', file=sys.stderr) + ( + "\x1b[7m IPv4 IPv6 Onion Pass " + " \x1b[0m" + ), + file=sys.stderr, + ) + print(f"{ip_stats(ips):s} Initial", file=sys.stderr) # Skip entries with invalid address. ips = [ip for ip in ips if ip is not None] - print( - f'{ip_stats(ips):s} Skip entries with invalid address', - file=sys.stderr) + print(f"{ip_stats(ips):s} Skip entries with invalid address", file=sys.stderr) # Skip duplicates (in case multiple seeds files were concatenated) ips = dedup(ips) - print(f'{ip_stats(ips):s} After removing duplicates', file=sys.stderr) + print(f"{ip_stats(ips):s} After removing duplicates", file=sys.stderr) # Enforce minimal number of blocks. - ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS] - print( - f'{ip_stats(ips):s} Enforce minimal number of blocks', - file=sys.stderr) + ips = [ip for ip in ips if ip["blocks"] >= MIN_BLOCKS] + print(f"{ip_stats(ips):s} Enforce minimal number of blocks", file=sys.stderr) # Require service bit 1. - ips = [ip for ip in ips if (ip['service'] & 1) == 1] - print(f'{ip_stats(ips):s} Require service bit 1', file=sys.stderr) + ips = [ip for ip in ips if (ip["service"] & 1) == 1] + print(f"{ip_stats(ips):s} Require service bit 1", file=sys.stderr) # Require at least 50% 30-day uptime for clearnet, 10% for onion. req_uptime = { - 'ipv4': 50, - 'ipv6': 50, - 'onion': 10, + "ipv4": 50, + "ipv6": 50, + "onion": 10, } - ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]] - print(f'{ip_stats(ips):s} Require minimum uptime', file=sys.stderr) + ips = [ip for ip in ips if ip["uptime"] > req_uptime[ip["net"]]] + print(f"{ip_stats(ips):s} Require minimum uptime", file=sys.stderr) # Require a known and recent user agent. - ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])] - print( - f'{ip_stats(ips):s} Require a known and recent user agent', - file=sys.stderr) + ips = [ip for ip in ips if PATTERN_AGENT.match(ip["agent"])] + print(f"{ip_stats(ips):s} Require a known and recent user agent", file=sys.stderr) # Sort by availability (and use last success as tie breaker) - ips.sort(key=lambda x: - (x['uptime'], x['lastsuccess'], x['ip']), reverse=True) + ips.sort(key=lambda x: (x["uptime"], x["lastsuccess"], x["ip"]), reverse=True) # Filter out hosts with multiple bitcoin ports, these are likely abusive ips = filtermultiport(ips) print( - f'{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports', - file=sys.stderr) + f"{ip_stats(ips):s} Filter out hosts with multiple bitcoin ports", + file=sys.stderr, + ) # Look up ASNs and limit results, both per ASN and globally. ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS) print( - f'{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net', - file=sys.stderr) + f"{ip_stats(ips):s} Look up ASNs and limit results per ASN and per net", + file=sys.stderr, + ) # Sort the results by IP address (for deterministic output). - ips.sort(key=lambda x: (x['net'], x['sortkey'])) + ips.sort(key=lambda x: (x["net"], x["sortkey"])) for ip in ips: - if ip['net'] == 'ipv6': + if ip["net"] == "ipv6": print(f"[{ip['ip']}]:{ip['port']}") else: print(f"{ip['ip']}:{ip['port']}") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/teamcity/build-configurations.py b/contrib/teamcity/build-configurations.py --- a/contrib/teamcity/build-configurations.py +++ b/contrib/teamcity/build-configurations.py @@ -42,19 +42,19 @@ self.project_root = PurePath( subprocess.run( - ['git', 'rev-parse', '--show-toplevel'], + ["git", "rev-parse", "--show-toplevel"], capture_output=True, check=True, - encoding='utf-8', + encoding="utf-8", text=True, ).stdout.strip() ) self.project_commit = subprocess.run( - ['git', 'rev-parse', '--short', 'HEAD'], + ["git", "rev-parse", "--short", "HEAD"], capture_output=True, check=True, - encoding='utf-8', + encoding="utf-8", text=True, ).stdout.strip() @@ -77,9 +77,8 @@ # it should not be empty. if not config.get("builds", None): raise AssertionError( - "Invalid configuration file {}: the \"builds\" element is missing or empty".format( - str(self.config_file) - ) + 'Invalid configuration file {}: the "builds" element is missing or' + " empty".format(str(self.config_file)) ) # Check the target build has an entry in the configuration file @@ -103,10 +102,8 @@ # Raise an error if the template does not exist if template_name not in templates: raise AssertionError( - "Build {} configuration inherits from template {}, but the template does not exist.".format( - self.name, - template_name - ) + "Build {} configuration inherits from template {}, but the template" + " does not exist.".format(self.name, template_name) ) always_merger.merge(template_config, templates.get(template_name)) @@ -114,15 +111,15 @@ # Create the build directory as needed self.build_directory = Path( - self.project_root.joinpath( - 'abc-ci-builds', - self.name)) + self.project_root.joinpath("abc-ci-builds", self.name) + ) # Define the junit and logs directories self.junit_reports_dir = self.build_directory.joinpath("test/junit") self.test_logs_dir = self.build_directory.joinpath("test/log") self.functional_test_logs = self.build_directory.joinpath( - "test/tmp/test_runner_*") + "test/tmp/test_runner_*" + ) # We will provide the required environment variables self.environment_variables = { @@ -134,12 +131,14 @@ def create_script_file(self, dest, content): # Write the content to a script file using a template - with open(self.script_root.joinpath("bash_script.sh.in"), encoding='utf-8') as f: + with open( + self.script_root.joinpath("bash_script.sh.in"), encoding="utf-8" + ) as f: script_template_content = f.read() template = Template(script_template_content) - with open(dest, 'w', encoding='utf-8') as f: + with open(dest, "w", encoding="utf-8") as f: f.write( template.safe_substitute( **self.environment_variables, @@ -176,7 +175,8 @@ context = docker_config.get("context", None) if context is None: raise AssertionError( - f"The docker configuration for build {self.name} is missing a context, aborting" + f"The docker configuration for build {self.name} is missing a" + " context, aborting" ) # Make the context path absolute context = self.project_root.joinpath(context) @@ -188,8 +188,11 @@ ) dockerfile = docker_config.get("dockerfile", None) - dockerfile_args = [ - "-f", str(self.project_root.joinpath(dockerfile))] if dockerfile else [] + dockerfile_args = ( + ["-f", str(self.project_root.joinpath(dockerfile))] + if dockerfile + else [] + ) tag_name = "-".join([self.name, self.project_commit]) @@ -197,7 +200,9 @@ self.build_steps.append( { "bin": "docker", - "args": ["build"] + dockerfile_args + ["-t", tag_name, str(context)], + "args": ( + ["build"] + dockerfile_args + ["-t", tag_name, str(context)] + ), } ) @@ -211,15 +216,32 @@ self.build_steps.append( { "bin": "docker", - "args": ["run", "--rm", "-d", "--name", tag_name, "--stop-signal", "SIGTERM", "--stop-timeout", "60"] + port_args + [tag_name], + "args": ( + [ + "run", + "--rm", + "-d", + "--name", + tag_name, + "--stop-signal", + "SIGTERM", + "--stop-timeout", + "60", + ] + + port_args + + [tag_name] + ), } ) timeout_minutes = docker_config.get("timeout_minutes", 60) # Write the address to stdout and to the preview_url log file - preview_msg = f"Preview is available at http://{ip_address}:{outer_port} for the next {timeout_minutes} minutes." - with open(preview_url, 'w', encoding='utf-8') as f: + preview_msg = ( + f"Preview is available at http://{ip_address}:{outer_port} for the next" + f" {timeout_minutes} minutes." + ) + with open(preview_url, "w", encoding="utf-8") as f: f.write(preview_msg) self.build_steps.append( { @@ -245,7 +267,11 @@ script_file = self.build_directory.joinpath("docker_timeout.sh") self.create_script_file( script_file, - f'cd "${{HOME}}" && echo "docker stop {tag_name}" | at now +{timeout_minutes} minutes') + ( + f'cd "${{HOME}}" && echo "docker stop {tag_name}" | at now' + f" +{timeout_minutes} minutes" + ), + ) self.build_steps.append( { @@ -263,31 +289,38 @@ targets = self.config.get("targets", None) if not targets: raise AssertionError( - "No build target has been provided for build {} and no script is defined, aborting".format( - self.name - ) + "No build target has been provided for build {} and no script is" + " defined, aborting".format(self.name) ) # Some more flags for the build_cmake.sh script if self.config.get("clang", False): - self.cmake_flags.extend([ - "-DCMAKE_C_COMPILER=clang", - "-DCMAKE_CXX_COMPILER=clang++", - ]) + self.cmake_flags.extend( + [ + "-DCMAKE_C_COMPILER=clang", + "-DCMAKE_CXX_COMPILER=clang++", + ] + ) if self.config.get("gcc", False): - self.cmake_flags.extend([ - "-DCMAKE_C_COMPILER=gcc", - "-DCMAKE_CXX_COMPILER=g++", - ]) + self.cmake_flags.extend( + [ + "-DCMAKE_C_COMPILER=gcc", + "-DCMAKE_CXX_COMPILER=g++", + ] + ) if self.config.get("junit", True): - self.cmake_flags.extend([ - "-DENABLE_JUNIT_REPORT=ON", - ]) + self.cmake_flags.extend( + [ + "-DENABLE_JUNIT_REPORT=ON", + ] + ) if self.config.get("Werror", False): - self.cmake_flags.extend([ - "-DCMAKE_C_FLAGS=-Werror", - "-DCMAKE_CXX_FLAGS=-Werror", - ]) + self.cmake_flags.extend( + [ + "-DCMAKE_C_FLAGS=-Werror", + "-DCMAKE_CXX_FLAGS=-Werror", + ] + ) # Get the generator, default to ninja generator = self.config.get("generator", {}) @@ -295,11 +328,10 @@ generator_command = generator.get("command", "ninja") # If the build runs on diff or has the fail_fast flag, exit on first error. # Otherwise keep running so we can gather more test result. - fail_fast = self.config.get( - "fail_fast", False) or self.config.get( - "runOnDiff", False) - generator_flags = generator.get( - "flags", ["-k0"] if not fail_fast else []) + fail_fast = self.config.get("fail_fast", False) or self.config.get( + "runOnDiff", False + ) + generator_flags = generator.get("flags", ["-k0"] if not fail_fast else []) # Max out the jobs by default when the generator uses make if generator_command == "make": @@ -315,14 +347,18 @@ # Both static_depends and toochain are mandatory for cross builds if not static_depends: raise AssertionError( - "`static_depends` configuration is required for cross builds") + "`static_depends` configuration is required for cross builds" + ) if not toolchain: raise AssertionError( - "`toolchain` configuration is required for cross builds") + "`toolchain` configuration is required for cross builds" + ) self.build_steps.append( { - "bin": str(self.project_root.joinpath("contrib/devtools/build_depends.sh")), + "bin": str( + self.project_root.joinpath("contrib/devtools/build_depends.sh") + ), "args": [static_depends], } ) @@ -330,9 +366,7 @@ toolchain_file = self.project_root.joinpath( f"cmake/platforms/{toolchain}.cmake" ) - self.cmake_flags.append( - f"-DCMAKE_TOOLCHAIN_FILE={str(toolchain_file)}" - ) + self.cmake_flags.append(f"-DCMAKE_TOOLCHAIN_FILE={str(toolchain_file)}") if emulator: self.cmake_flags.append( @@ -343,7 +377,11 @@ self.build_steps.append( { "bin": "cmake", - "args": ["-G", generator_name, str(self.project_root)] + self.cmake_flags, + "args": [ + "-G", + generator_name, + str(self.project_root), + ] + self.cmake_flags, } ) @@ -372,7 +410,7 @@ return self.config.get(key, default) -class UserBuild(): +class UserBuild: def __init__(self, configuration): self.configuration = configuration @@ -385,8 +423,7 @@ # - the clean log will contain the same filtered content as what is # printed to stdout. This filter is done in print_line_to_logs(). self.logs = {} - self.logs["clean_log"] = build_directory.joinpath( - "build.clean.log") + self.logs["clean_log"] = build_directory.joinpath("build.clean.log") self.logs["full_log"] = build_directory.joinpath("build.full.log") # Clean the build directory before any build step is run. @@ -395,7 +432,7 @@ self.configuration.build_directory.mkdir(exist_ok=True, parents=True) self.preview_url = build_directory.joinpath("preview_url.log") - self.ip_address = '127.0.0.1' + self.ip_address = "127.0.0.1" def copy_artifacts(self, artifacts): # Make sure the artifact directory always exists. It is created before @@ -412,8 +449,11 @@ # from it needs to be excluded from the glob matches to prevent infinite # recursion. for pattern, dest in artifacts.items(): - matches = [m for m in sorted(self.configuration.build_directory.glob( - pattern)) if self.artifact_dir not in m.parents and self.artifact_dir != m] + matches = [ + m + for m in sorted(self.configuration.build_directory.glob(pattern)) + if self.artifact_dir not in m.parents and self.artifact_dir != m + ] dest = self.artifact_dir.joinpath(dest) # Pattern did not match @@ -443,12 +483,12 @@ def print_line_to_logs(self, line): # Always print to the full log - with open(self.logs["full_log"], 'a', encoding='utf-8') as log: + with open(self.logs["full_log"], "a", encoding="utf-8") as log: log.write(line) # Discard the set -x bash output for stdout and the clean log if not line.startswith("+"): - with open(self.logs["clean_log"], 'a', encoding='utf-8') as log: + with open(self.logs["clean_log"], "a", encoding="utf-8") as log: log.write(line) print(line.rstrip()) @@ -456,7 +496,7 @@ while True: try: line = await stdout.readline() - line = line.decode('utf-8') + line = line.decode("utf-8") if not line: break @@ -501,7 +541,9 @@ await asyncio.wait_for(logging_task, timeout=5) except asyncio.TimeoutError: self.print_line_to_logs( - "Warning: Timed out while waiting for logging to flush. Some log lines may be missing.") + "Warning: Timed out while waiting for logging to flush. Some log lines" + " may be missing." + ) return result @@ -510,11 +552,12 @@ message = f"Build {self.configuration.name} completed successfully" try: for step in self.configuration.build_steps: - return_code = await asyncio.wait_for(self.run_build(step["bin"], step["args"]), timeout) + return_code = await asyncio.wait_for( + self.run_build(step["bin"], step["args"]), timeout + ) if return_code != 0: message = "Build {} failed with exit code {}".format( - self.configuration.name, - return_code + self.configuration.name, return_code ) return @@ -534,9 +577,13 @@ **self.configuration.get("artifacts", {}), str(self.logs["full_log"].relative_to(build_directory)): "", str(self.logs["clean_log"].relative_to(build_directory)): "", - str(self.configuration.junit_reports_dir.relative_to(build_directory)): "", + str( + self.configuration.junit_reports_dir.relative_to(build_directory) + ): "", str(self.configuration.test_logs_dir.relative_to(build_directory)): "", - str(self.configuration.functional_test_logs.relative_to(build_directory)): "functional", + str( + self.configuration.functional_test_logs.relative_to(build_directory) + ): "functional", str(self.preview_url.relative_to(build_directory)): "", } @@ -551,12 +598,11 @@ self.artifact_dir.mkdir(exist_ok=True) self.configuration.create_build_steps( - self.artifact_dir, self.preview_url, self.ip_address) + self.artifact_dir, self.preview_url, self.ip_address + ) return_code, message = asyncio.run( - self.wait_for_build( - self.configuration.get( - "timeout", DEFAULT_TIMEOUT)) + self.wait_for_build(self.configuration.get("timeout", DEFAULT_TIMEOUT)) ) return (return_code, message) @@ -577,6 +623,7 @@ # Only gather the public IP if we are running on a TC build agent from whatismyip import whatismyip + self.ip_address = whatismyip() def copy_artifacts(self, artifacts): @@ -598,8 +645,7 @@ # Let the user know what build is being run. # This makes it easier to retrieve the info from the logs. self.teamcity_messages.customMessage( - f"Starting build {self.configuration.name}", - status="NORMAL" + f"Starting build {self.configuration.name}", status="NORMAL" ) return_code, message = super().run() @@ -612,20 +658,20 @@ self.teamcity_messages.buildProblem( message, # Let Teamcity calculate an ID from our message - None + None, ) # Change the final build message self.teamcity_messages.buildStatus( # Don't change the status, let Teamcity set it to failure None, - message + message, ) else: # Change the final build message but keep the original one as well self.teamcity_messages.buildStatus( # Don't change the status, let Teamcity set it to success None, - f"{message} ({{build.status.text}})" + f"{message} ({{build.status.text}})", ) return (return_code, message) @@ -636,29 +682,23 @@ # By default search for a configuration file in the same directory as this # script. - default_config_path = Path( - script_dir.joinpath("build-configurations.yml") - ) + default_config_path = Path(script_dir.joinpath("build-configurations.yml")) parser = argparse.ArgumentParser(description="Run a CI build") - parser.add_argument( - "build", - help="The name of the build to run" - ) + parser.add_argument("build", help="The name of the build to run") parser.add_argument( "--config", "-c", help="Path to the builds configuration file (default to {})".format( str(default_config_path) - ) + ), ) args, unknown_args = parser.parse_known_args() # Check the configuration file exists config_path = Path(args.config) if args.config else default_config_path - build_configuration = BuildConfiguration( - script_dir, config_path, args.build) + build_configuration = BuildConfiguration(script_dir, config_path, args.build) if is_running_under_teamcity(): build = TeamcityBuild(build_configuration) @@ -668,5 +708,5 @@ sys.exit(build.run(unknown_args)[0]) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/contrib/teamcity/nanobench_json_to_teamcity_messages.py b/contrib/teamcity/nanobench_json_to_teamcity_messages.py --- a/contrib/teamcity/nanobench_json_to_teamcity_messages.py +++ b/contrib/teamcity/nanobench_json_to_teamcity_messages.py @@ -10,8 +10,7 @@ from teamcity.messages import TeamcityServiceMessages if len(sys.argv) != 3: - print( - f""" + print(f""" Usage: {sys.argv[0]} @@ -24,62 +23,56 @@ sys.exit(1) suite_name = sys.argv[1] -with open(sys.argv[2], encoding='utf-8') as f: +with open(sys.argv[2], encoding="utf-8") as f: json_results = json.load(f) teamcity_messages = TeamcityServiceMessages() -teamcity_messages.testSuiteStarted( - suite_name -) +teamcity_messages.testSuiteStarted(suite_name) def testMetadata_number_message(test_name, param_name, param_value): teamcity_messages.message( - 'testMetadata', - type='number', + "testMetadata", + type="number", testName=test_name, name=param_name, - value=f'{param_value:.2f}', + value=f"{param_value:.2f}", ) -for result in json_results.get('results', []): - test_name = result['name'] +for result in json_results.get("results", []): + test_name = result["name"] - teamcity_messages.testStarted( - test_name - ) + teamcity_messages.testStarted(test_name) testMetadata_number_message( test_name, f"ns/{result['unit']}", - 1e9 * result['median(elapsed)'] / result['batch'], + 1e9 * result["median(elapsed)"] / result["batch"], ) testMetadata_number_message( test_name, f"{result['unit']}/s", - result['batch'] / result['median(elapsed)'], + result["batch"] / result["median(elapsed)"], ) testMetadata_number_message( test_name, - 'err%', - 100 * result['medianAbsolutePercentError(elapsed)'], + "err%", + 100 * result["medianAbsolutePercentError(elapsed)"], ) testMetadata_number_message( test_name, f"ins/{result['unit']}", - result['median(instructions)'] / result['batch'], + result["median(instructions)"] / result["batch"], ) teamcity_messages.testFinished( test_name, - testDuration=timedelta(seconds=result['totalTime']), + testDuration=timedelta(seconds=result["totalTime"]), ) -teamcity_messages.testSuiteFinished( - suite_name -) +teamcity_messages.testSuiteFinished(suite_name) diff --git a/contrib/testgen/base58.py b/contrib/testgen/base58.py --- a/contrib/testgen/base58.py +++ b/contrib/testgen/base58.py @@ -2,11 +2,11 @@ # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' +""" Bitcoin base58 encoding and decoding. Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain) -''' +""" import hashlib # for compatibility with following code... @@ -24,21 +24,21 @@ def chr(n): # noqa: A001 return bytes((n,)) -__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' + +__b58chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz" __b58base = len(__b58chars) b58chars = __b58chars def b58encode(v): - """ encode v, which is a string of bytes, to base58. - """ + """encode v, which is a string of bytes, to base58.""" long_value = 0 - for (i, c) in enumerate(v[::-1]): + for i, c in enumerate(v[::-1]): if isinstance(c, str): c = ord(c) long_value += (256**i) * c - result = '' + result = "" while long_value >= __b58base: div, mod = divmod(long_value, __b58base) result = __b58chars[mod] + result @@ -58,8 +58,7 @@ def b58decode(v, length=None): - """ decode v into a string of len bytes - """ + """decode v into a string of len bytes""" long_value = 0 for i, c in enumerate(v[::-1]): pos = __b58chars.find(c) @@ -109,7 +108,7 @@ def get_bcaddress_version(strAddress): - """ Returns None if strAddress is invalid. Otherwise returns integer version of address. """ + """Returns None if strAddress is invalid. Otherwise returns integer version of address.""" addr = b58decode_chk(strAddress) if addr is None or len(addr) != 21: return None @@ -117,11 +116,11 @@ return ord(version) -if __name__ == '__main__': +if __name__ == "__main__": # Test case (from http://gitorious.org/bitcoin/python-base58.git) - assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') == 0 - _ohai = 'o hai'.encode('ascii') + assert get_bcaddress_version("15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC") == 0 + _ohai = "o hai".encode("ascii") _tmp = b58encode(_ohai) - assert _tmp == 'DYB3oMS' + assert _tmp == "DYB3oMS" assert b58decode(_tmp, 5) == _ohai print("Tests passed") diff --git a/contrib/testgen/gen_base58_test_vectors.py b/contrib/testgen/gen_base58_test_vectors.py --- a/contrib/testgen/gen_base58_test_vectors.py +++ b/contrib/testgen/gen_base58_test_vectors.py @@ -2,13 +2,13 @@ # Copyright (c) 2012-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' +""" Generate valid and invalid base58 address and private key test vectors. Usage: gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json -''' +""" # 2012 Wladimir J. van der Laan # Released under MIT License import os @@ -26,24 +26,24 @@ PRIVKEY = 128 PRIVKEY_TEST = 239 -metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed'] +metadata_keys = ["isPrivkey", "isTestnet", "addrType", "isCompressed"] # templates for valid sequences templates = [ # prefix, payload_size, suffix, metadata # None = N/A - ((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)), - ((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)), - ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)), - ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)), + ((PUBKEY_ADDRESS,), 20, (), (False, False, "pubkey", None)), + ((SCRIPT_ADDRESS,), 20, (), (False, False, "script", None)), + ((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, "pubkey", None)), + ((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, "script", None)), ((PRIVKEY,), 32, (), (True, False, None, False)), ((PRIVKEY,), 32, (1,), (True, False, None, True)), ((PRIVKEY_TEST,), 32, (), (True, True, None, False)), - ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)) + ((PRIVKEY_TEST,), 32, (1,), (True, True, None, True)), ] def is_valid(v): - '''Check vector v for validity''' + """Check vector v for validity""" result = b58decode_chk(v) if result is None: return False @@ -57,7 +57,7 @@ def gen_valid_vectors(): - '''Generate valid test vectors''' + """Generate valid test vectors""" while True: for template in templates: prefix = bytearray(template[0]) @@ -65,17 +65,19 @@ suffix = bytearray(template[2]) rv = b58encode_chk(prefix + payload + suffix) assert is_valid(rv) - metadata = {x: y for x, y in zip( - metadata_keys, template[3]) if y is not None} + metadata = { + x: y for x, y in zip(metadata_keys, template[3]) if y is not None + } hexrepr = b2a_hex(payload) if isinstance(hexrepr, bytes): - hexrepr = hexrepr.decode('utf8') + hexrepr = hexrepr.decode("utf8") yield (rv, hexrepr, metadata) -def gen_invalid_vector(template, corrupt_prefix, - randomize_payload_size, corrupt_suffix): - '''Generate possibly invalid vector''' +def gen_invalid_vector( + template, corrupt_prefix, randomize_payload_size, corrupt_suffix +): + """Generate possibly invalid vector""" if corrupt_prefix: prefix = os.urandom(1) else: @@ -95,12 +97,12 @@ def randbool(p=0.5): - '''Return True with P(p)''' + """Return True with P(p)""" return random.random() < p def gen_invalid_vectors(): - '''Generate invalid test vectors''' + """Generate invalid test vectors""" # start with some manual edge-cases yield "", yield "x", @@ -111,22 +113,24 @@ # invalid (randomized) suffix (add random data) # corrupt checksum for template in templates: - val = gen_invalid_vector(template, randbool( - 0.2), randbool(0.2), randbool(0.2)) + val = gen_invalid_vector( + template, randbool(0.2), randbool(0.2), randbool(0.2) + ) if random.randint(0, 10) < 1: # line corruption if randbool(): # add random character to end val += random.choice(b58chars) else: # replace random character in the middle n = random.randint(0, len(val)) - val = val[0:n] + random.choice(b58chars) + val[n + 1:] + val = val[0:n] + random.choice(b58chars) + val[n + 1 :] if not is_valid(val): yield val, -if __name__ == '__main__': +if __name__ == "__main__": import json import sys - iters = {'valid': gen_valid_vectors, 'invalid': gen_invalid_vectors} + + iters = {"valid": gen_valid_vectors, "invalid": gen_invalid_vectors} try: uiter = iters[sys.argv[1]] except IndexError: @@ -138,4 +142,4 @@ data = list(islice(uiter(), count)) json.dump(data, sys.stdout, sort_keys=True, indent=4) - sys.stdout.write('\n') + sys.stdout.write("\n") diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py --- a/contrib/tracing/log_raw_p2p_msgs.py +++ b/contrib/tracing/log_raw_p2p_msgs.py @@ -118,17 +118,20 @@ def print_message(event, inbound): print( "{} {} msg '{}' from peer {} ({}, {}) with {} bytes: {}".format( - "Warning: incomplete message (only {} out of {} bytes)!".format( - len(event.msg), - event.msg_size - ) if len(event.msg) < event.msg_size else "", + ( + "Warning: incomplete message (only {} out of {} bytes)!".format( + len(event.msg), event.msg_size + ) + if len(event.msg) < event.msg_size + else "" + ), "inbound" if inbound else "outbound", event.msg_type.decode("utf-8"), event.peer_id, event.peer_conn_type.decode("utf-8"), event.peer_addr.decode("utf-8"), event.msg_size, - bytes(event.msg[:event.msg_size]).hex(), + bytes(event.msg[: event.msg_size]).hex(), ) ) @@ -139,14 +142,16 @@ # attaching the trace functions defined in the BPF program to the # tracepoints bitcoind_with_usdts.enable_probe( - probe="inbound_message", fn_name="trace_inbound_message") + probe="inbound_message", fn_name="trace_inbound_message" + ) bitcoind_with_usdts.enable_probe( - probe="outbound_message", fn_name="trace_outbound_message") + probe="outbound_message", fn_name="trace_outbound_message" + ) bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) # BCC: perf buffer handle function for inbound_messages def handle_inbound(_, data, size): - """ Inbound message handler. + """Inbound message handler. Called each time a message is submitted to the inbound_messages BPF table.""" @@ -156,7 +161,7 @@ # BCC: perf buffer handle function for outbound_messages def handle_outbound(_, data, size): - """ Outbound message handler. + """Outbound message handler. Called each time a message is submitted to the outbound_messages BPF table.""" diff --git a/contrib/tracing/log_utxocache_flush.py b/contrib/tracing/log_utxocache_flush.py --- a/contrib/tracing/log_utxocache_flush.py +++ b/contrib/tracing/log_utxocache_flush.py @@ -39,12 +39,7 @@ } """ -FLUSH_MODES = [ - 'NONE', - 'IF_NEEDED', - 'PERIODIC', - 'ALWAYS' -] +FLUSH_MODES = ["NONE", "IF_NEEDED", "PERIODIC", "ALWAYS"] class Data(ctypes.Structure): @@ -54,18 +49,20 @@ ("mode", ctypes.c_uint32), ("coins_count", ctypes.c_uint64), ("coins_mem_usage", ctypes.c_uint64), - ("is_flush_for_prune", ctypes.c_bool) + ("is_flush_for_prune", ctypes.c_bool), ] def print_event(event): - print("{:15d} {:10s} {:15d} {:15s} {:8s}".format( - event.duration, - FLUSH_MODES[event.mode], - event.coins_count, - "{:.2f} kB".format(event.coins_mem_usage / 1000), - str(event.is_flush_for_prune), - )) + print( + "{:15d} {:10s} {:15d} {:15s} {:8s}".format( + event.duration, + FLUSH_MODES[event.mode], + event.coins_count, + "{:.2f} kB".format(event.coins_mem_usage / 1000), + str(event.is_flush_for_prune), + ) + ) def main(bitcoind_path): @@ -73,13 +70,12 @@ # attaching the trace functions defined in the BPF program # to the tracepoints - bitcoind_with_usdts.enable_probe( - probe="flush", fn_name="trace_flush") + bitcoind_with_usdts.enable_probe(probe="flush", fn_name="trace_flush") b = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) def handle_flush(_, data, size): - """ Coins Flush handler. - Called each time coin caches and indexes are flushed.""" + """Coins Flush handler. + Called each time coin caches and indexes are flushed.""" event = ctypes.cast(data, ctypes.POINTER(Data)).contents print_event(event) @@ -87,11 +83,9 @@ print("Logging utxocache flushes. Ctrl-C to end...") print( "{:15s} {:10s} {:15s} {:15s} {:8s}".format( - "Duration (µs)", - "Mode", - "Coins Count", - "Memory Usage", - "Flush for Prune")) + "Duration (µs)", "Mode", "Coins Count", "Memory Usage", "Flush for Prune" + ) + ) while True: try: diff --git a/contrib/tracing/p2p_monitor.py b/contrib/tracing/p2p_monitor.py --- a/contrib/tracing/p2p_monitor.py +++ b/contrib/tracing/p2p_monitor.py @@ -72,7 +72,8 @@ class Message: - """ A P2P network message. """ + """A P2P network message.""" + msg_type = "" size = 0 data = bytes() @@ -85,7 +86,8 @@ class Peer: - """ A P2P network peer. """ + """A P2P network peer.""" + id = 0 address = "" connection_type = "" @@ -122,36 +124,46 @@ # attaching the trace functions defined in the BPF program to the # tracepoints bitcoind_with_usdts.enable_probe( - probe="inbound_message", fn_name="trace_inbound_message") + probe="inbound_message", fn_name="trace_inbound_message" + ) bitcoind_with_usdts.enable_probe( - probe="outbound_message", fn_name="trace_outbound_message") + probe="outbound_message", fn_name="trace_outbound_message" + ) bpf = BPF(text=program, usdt_contexts=[bitcoind_with_usdts]) # BCC: perf buffer handle function for inbound_messages def handle_inbound(_, data, size): - """ Inbound message handler. + """Inbound message handler. Called each time a message is submitted to the inbound_messages BPF table.""" event = bpf["inbound_messages"].event(data) if event.peer_id not in peers: - peer = Peer(event.peer_id, event.peer_addr.decode( - "utf-8"), event.peer_conn_type.decode("utf-8")) + peer = Peer( + event.peer_id, + event.peer_addr.decode("utf-8"), + event.peer_conn_type.decode("utf-8"), + ) peers[peer.id] = peer peers[event.peer_id].add_message( - Message(event.msg_type.decode("utf-8"), event.msg_size, True)) + Message(event.msg_type.decode("utf-8"), event.msg_size, True) + ) # BCC: perf buffer handle function for outbound_messages def handle_outbound(_, data, size): - """ Outbound message handler. + """Outbound message handler. Called each time a message is submitted to the outbound_messages BPF table.""" event = bpf["outbound_messages"].event(data) if event.peer_id not in peers: - peer = Peer(event.peer_id, event.peer_addr.decode( - "utf-8"), event.peer_conn_type.decode("utf-8")) + peer = Peer( + event.peer_id, + event.peer_addr.decode("utf-8"), + event.peer_conn_type.decode("utf-8"), + ) peers[peer.id] = peer peers[event.peer_id].add_message( - Message(event.msg_type.decode("utf-8"), event.msg_size, False)) + Message(event.msg_type.decode("utf-8"), event.msg_size, False) + ) # BCC: add handlers to the inbound and outbound perf buffers bpf["inbound_messages"].open_perf_buffer(handle_inbound) @@ -165,8 +177,9 @@ cur_list_pos = 0 win = curses.newwin(30, 70, 2, 7) win.erase() - win.border(ord("|"), ord("|"), ord("-"), ord("-"), - ord("-"), ord("-"), ord("-"), ord("-")) + win.border( + ord("|"), ord("|"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-"), ord("-") + ) info_panel = panel.new_panel(win) info_panel.hide() @@ -179,76 +192,117 @@ bpf.perf_buffer_poll(timeout=50) ch = screen.getch() - if (ch == curses.KEY_DOWN or ch == ord("j")) and cur_list_pos < len( - peers.keys()) - 1 and info_panel.hidden(): + if ( + (ch == curses.KEY_DOWN or ch == ord("j")) + and cur_list_pos < len(peers.keys()) - 1 + and info_panel.hidden() + ): cur_list_pos += 1 if cur_list_pos >= ROWS_AVALIABLE_FOR_LIST: scroll += 1 - if ((ch == curses.KEY_UP or ch == ord("k")) - and cur_list_pos > 0 and info_panel.hidden()): + if ( + (ch == curses.KEY_UP or ch == ord("k")) + and cur_list_pos > 0 + and info_panel.hidden() + ): cur_list_pos -= 1 if scroll > 0: scroll -= 1 - if ch == ord('\n') or ch == ord(' '): + if ch == ord("\n") or ch == ord(" "): if info_panel.hidden(): info_panel.show() else: info_panel.hide() screen.erase() render( - screen, - peers, - cur_list_pos, - scroll, - ROWS_AVALIABLE_FOR_LIST, - info_panel) + screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel + ) curses.panel.update_panels() screen.refresh() except KeyboardInterrupt: exit() -def render(screen, peers, cur_list_pos, scroll, - ROWS_AVALIABLE_FOR_LIST, info_panel): - """ renders the list of peers and details panel +def render(screen, peers, cur_list_pos, scroll, ROWS_AVALIABLE_FOR_LIST, info_panel): + """renders the list of peers and details panel This code is unrelated to USDT, BCC and BPF. """ header_format = "%6s %-20s %-20s %-22s %-67s" row_format = "%6s %-5d %9d byte %-5d %9d byte %-22s %-67s" - screen.addstr(0, 1, (" P2P Message Monitor "), curses.A_REVERSE) + screen.addstr(0, 1, " P2P Message Monitor ", curses.A_REVERSE) + screen.addstr( + 1, + 0, + ( + " Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see" + " individual P2P messages" + ), + curses.A_NORMAL, + ) screen.addstr( - 1, 0, (" Navigate with UP/DOWN or J/K and select a peer with ENTER or SPACE to see individual P2P messages"), curses.A_NORMAL) - screen.addstr(3, 0, - header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), curses.A_BOLD | curses.A_UNDERLINE) - peer_list = sorted(peers.keys())[scroll:ROWS_AVALIABLE_FOR_LIST + scroll] + 3, + 0, + header_format % ("PEER", "OUTBOUND", "INBOUND", "TYPE", "ADDR"), + curses.A_BOLD | curses.A_UNDERLINE, + ) + peer_list = sorted(peers.keys())[scroll : ROWS_AVALIABLE_FOR_LIST + scroll] for i, peer_id in enumerate(peer_list): peer = peers[peer_id] - screen.addstr(i + 4, 0, - row_format % (peer.id, peer.total_outbound_msgs, peer.total_outbound_bytes, - peer.total_inbound_msgs, peer.total_inbound_bytes, - peer.connection_type, peer.address), - curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL) + screen.addstr( + i + 4, + 0, + row_format + % ( + peer.id, + peer.total_outbound_msgs, + peer.total_outbound_bytes, + peer.total_inbound_msgs, + peer.total_inbound_bytes, + peer.connection_type, + peer.address, + ), + curses.A_REVERSE if i + scroll == cur_list_pos else curses.A_NORMAL, + ) if i + scroll == cur_list_pos: info_window = info_panel.window() info_window.erase() info_window.border( - ord("|"), ord("|"), ord("-"), ord("-"), - ord("-"), ord("-"), ord("-"), ord("-")) + ord("|"), + ord("|"), + ord("-"), + ord("-"), + ord("-"), + ord("-"), + ord("-"), + ord("-"), + ) info_window.addstr( - 1, 1, f"PEER {peer.id} ({peer.address})".center(68), curses.A_REVERSE | curses.A_BOLD) + 1, + 1, + f"PEER {peer.id} ({peer.address})".center(68), + curses.A_REVERSE | curses.A_BOLD, + ) info_window.addstr( - 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", - curses.A_BOLD) + 2, 1, f" OUR NODE{peer.connection_type:^54}PEER ", curses.A_BOLD + ) for i, msg in enumerate(peer.last_messages): if msg.inbound: info_window.addstr( - i + 3, 1, f"{f'<--- {msg.msg_type} ({msg.size} bytes) ':68s}", curses.A_NORMAL) + i + 3, + 1, + f"{f'<--- {msg.msg_type} ({msg.size} bytes) ':68s}", + curses.A_NORMAL, + ) else: info_window.addstr( - i + 3, 1, f" {msg.msg_type} ({msg.size} byte) --->", curses.A_NORMAL) + i + 3, + 1, + f" {msg.msg_type} ({msg.size} byte) --->", + curses.A_NORMAL, + ) if __name__ == "__main__": diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py --- a/contrib/zmq/zmq_sub.py +++ b/contrib/zmq/zmq_sub.py @@ -41,7 +41,7 @@ ip = "127.0.0.1" -class ZMQHandler(): +class ZMQHandler: def __init__(self): self.loop = asyncio.get_event_loop() self.zmqContext = zmq.asyncio.Context() @@ -59,25 +59,28 @@ topic, body, seq = await self.zmqSubSocket.recv_multipart() sequence = "Unknown" if len(seq) == 4: - sequence = str(struct.unpack('