Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show All 10 Lines | |||||
Functional tests are disabled on Windows by default. Use --force to run them anyway. | Functional tests are disabled on Windows by default. Use --force to run them anyway. | ||||
For a description of arguments recognized by test scripts, see | For a description of arguments recognized by test scripts, see | ||||
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. | `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. | ||||
""" | """ | ||||
import argparse | import argparse | ||||
from collections import deque | |||||
import configparser | import configparser | ||||
import datetime | import datetime | ||||
import os | import os | ||||
import time | import time | ||||
import shutil | import shutil | ||||
import sys | import sys | ||||
import subprocess | import subprocess | ||||
import tempfile | import tempfile | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | class TestCase(): | ||||
def run(self, portseed_offset): | def run(self, portseed_offset): | ||||
t = self.test_case | t = self.test_case | ||||
portseed = self.test_num + portseed_offset | portseed = self.test_num + portseed_offset | ||||
portseed_arg = ["--portseed={}".format(portseed)] | portseed_arg = ["--portseed={}".format(portseed)] | ||||
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) | log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) | ||||
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) | log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) | ||||
test_argv = t.split() | test_argv = t.split() | ||||
tmpdir = [os.path.join("--tmpdir={}", "{}_{}").format( | testdir = os.path.join("{}", "{}_{}").format( | ||||
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)] | self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) | ||||
tmpdir_arg = ["--tmpdir={}".format(testdir)] | |||||
name = t | name = t | ||||
time0 = time.time() | time0 = time.time() | ||||
process = subprocess.Popen([os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir, | process = subprocess.Popen([os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, | ||||
universal_newlines=True, | universal_newlines=True, | ||||
stdout=log_stdout, | stdout=log_stdout, | ||||
stderr=log_stderr) | stderr=log_stderr) | ||||
process.wait() | process.wait() | ||||
log_stdout.seek(0), log_stderr.seek(0) | log_stdout.seek(0), log_stderr.seek(0) | ||||
[stdout, stderr] = [l.read().decode('utf-8') | [stdout, stderr] = [l.read().decode('utf-8') | ||||
for l in (log_stdout, log_stderr)] | for l in (log_stdout, log_stderr)] | ||||
log_stdout.close(), log_stderr.close() | log_stdout.close(), log_stderr.close() | ||||
if process.returncode == TEST_EXIT_PASSED and stderr == "": | if process.returncode == TEST_EXIT_PASSED and stderr == "": | ||||
status = "Passed" | status = "Passed" | ||||
elif process.returncode == TEST_EXIT_SKIPPED: | elif process.returncode == TEST_EXIT_SKIPPED: | ||||
status = "Skipped" | status = "Skipped" | ||||
else: | else: | ||||
status = "Failed" | status = "Failed" | ||||
return TestResult(self.test_num, name, status, int(time.time() - time0), stdout, stderr) | return TestResult(self.test_num, name, testdir, status, int(time.time() - time0), stdout, stderr) | ||||
def on_ci(): | def on_ci(): | ||||
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None | return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None | ||||
def main(): | def main(): | ||||
# Read config generated by configure. | # Read config generated by configure. | ||||
config = configparser.ConfigParser() | config = configparser.ConfigParser() | ||||
configfile = os.path.join(os.path.abspath( | configfile = os.path.join(os.path.abspath( | ||||
os.path.dirname(__file__)), "..", "config.ini") | os.path.dirname(__file__)), "..", "config.ini") | ||||
config.read_file(open(configfile)) | config.read_file(open(configfile)) | ||||
src_dir = config["environment"]["SRCDIR"] | src_dir = config["environment"]["SRCDIR"] | ||||
build_dir = config["environment"]["BUILDDIR"] | build_dir = config["environment"]["BUILDDIR"] | ||||
tests_dir = os.path.join(src_dir, 'test', 'functional') | tests_dir = os.path.join(src_dir, 'test', 'functional') | ||||
# Parse arguments and pass through unrecognised args | # Parse arguments and pass through unrecognised args | ||||
parser = argparse.ArgumentParser(add_help=False, | parser = argparse.ArgumentParser(add_help=False, | ||||
usage='%(prog)s [test_runner.py options] [script options] [scripts]', | usage='%(prog)s [test_runner.py options] [script options] [scripts]', | ||||
description=__doc__, | description=__doc__, | ||||
epilog=''' | epilog=''' | ||||
Help text and arguments for individual test script:''', | Help text and arguments for individual test script:''', | ||||
formatter_class=argparse.RawTextHelpFormatter) | formatter_class=argparse.RawTextHelpFormatter) | ||||
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, | |||||
help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.') | |||||
parser.add_argument('--coverage', action='store_true', | parser.add_argument('--coverage', action='store_true', | ||||
help='generate a basic coverage report for the RPC interface') | help='generate a basic coverage report for the RPC interface') | ||||
parser.add_argument( | parser.add_argument( | ||||
'--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') | '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') | ||||
parser.add_argument('--extended', action='store_true', | parser.add_argument('--extended', action='store_true', | ||||
help='run the extended test suite in addition to the basic tests') | help='run the extended test suite in addition to the basic tests') | ||||
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, | parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, | ||||
help='set the cutoff runtime for what tests get run') | help='set the cutoff runtime for what tests get run') | ||||
▲ Show 20 Lines • Show All 128 Lines • ▼ Show 20 Lines | if args.help: | ||||
[os.path.join(tests_dir, test_list[0]), '-h']) | [os.path.join(tests_dir, test_list[0]), '-h']) | ||||
sys.exit(0) | sys.exit(0) | ||||
if not args.keepcache: | if not args.keepcache: | ||||
shutil.rmtree(os.path.join(build_dir, "test", | shutil.rmtree(os.path.join(build_dir, "test", | ||||
"cache"), ignore_errors=True) | "cache"), ignore_errors=True) | ||||
run_tests(test_list, build_dir, tests_dir, args.junitouput, | run_tests(test_list, build_dir, tests_dir, args.junitouput, | ||||
tmpdir, args.jobs, args.coverage, passon_args, build_timings) | tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen, build_timings) | ||||
def run_tests(test_list, build_dir, tests_dir, junitouput, tmpdir, num_jobs, enable_coverage=False, args=[], build_timings=None): | def run_tests(test_list, build_dir, tests_dir, junitouput, tmpdir, num_jobs, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None): | ||||
# Warn if bitcoind is already running (unix only) | # Warn if bitcoind is already running (unix only) | ||||
try: | try: | ||||
pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | ||||
if pidofOutput is not None and pidofOutput != b'': | if pidofOutput is not None and pidofOutput != b'': | ||||
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format( | print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format( | ||||
BOLD[1], BOLD[0])) | BOLD[1], BOLD[0])) | ||||
except (OSError, subprocess.SubprocessError): | except (OSError, subprocess.SubprocessError): | ||||
pass | pass | ||||
Show All 26 Lines | def run_tests(test_list, build_dir, tests_dir, junitouput, tmpdir, num_jobs, enable_coverage=False, args=[], combined_logs_len=0, build_timings=None): | ||||
# Run Tests | # Run Tests | ||||
time0 = time.time() | time0 = time.time() | ||||
test_results = execute_test_processes( | test_results = execute_test_processes( | ||||
num_jobs, test_list, tests_dir, tmpdir, flags) | num_jobs, test_list, tests_dir, tmpdir, flags) | ||||
runtime = int(time.time() - time0) | runtime = int(time.time() - time0) | ||||
max_len_name = len(max(test_list, key=len)) | max_len_name = len(max(test_list, key=len)) | ||||
print_results(test_results, max_len_name, runtime) | print_results(test_results, tests_dir, max_len_name, | ||||
runtime, combined_logs_len) | |||||
save_results_as_junit(test_results, junitouput, runtime) | save_results_as_junit(test_results, junitouput, runtime) | ||||
if (build_timings is not None): | if (build_timings is not None): | ||||
build_timings.save_timings(test_results) | build_timings.save_timings(test_results) | ||||
if coverage: | if coverage: | ||||
coverage.report_rpc_coverage() | coverage.report_rpc_coverage() | ||||
▲ Show 20 Lines • Show All 130 Lines • ▼ Show 20 Lines | def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags): | ||||
# Flush our queues so the threads exit | # Flush our queues so the threads exit | ||||
update_queue.put(None) | update_queue.put(None) | ||||
for j in range(num_jobs): | for j in range(num_jobs): | ||||
job_queue.put(None) | job_queue.put(None) | ||||
return test_results | return test_results | ||||
def print_results(test_results, max_len_name, runtime): | def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len): | ||||
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( | results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( | ||||
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] | "TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] | ||||
test_results.sort(key=lambda result: result.name.lower()) | test_results.sort(key=lambda result: result.name.lower()) | ||||
all_passed = True | all_passed = True | ||||
time_sum = 0 | time_sum = 0 | ||||
for test_result in test_results: | for test_result in test_results: | ||||
all_passed = all_passed and test_result.was_successful | all_passed = all_passed and test_result.was_successful | ||||
time_sum += test_result.time | time_sum += test_result.time | ||||
test_result.padding = max_len_name | test_result.padding = max_len_name | ||||
results += str(test_result) | results += str(test_result) | ||||
testdir = test_result.testdir | |||||
if combined_logs_len and os.path.isdir(testdir): | |||||
# Print the final `combinedlogslen` lines of the combined logs | |||||
print('{}Combine the logs and print the last {} lines ...{}'.format( | |||||
BOLD[1], combined_logs_len, BOLD[0])) | |||||
print('\n============') | |||||
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) | |||||
print('============\n') | |||||
combined_logs, _ = subprocess.Popen([os.path.join( | |||||
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate() | |||||
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) | |||||
status = TICK + "Passed" if all_passed else CROSS + "Failed" | status = TICK + "Passed" if all_passed else CROSS + "Failed" | ||||
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format( | results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format( | ||||
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] | "ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] | ||||
results += "Runtime: {} s\n".format(runtime) | results += "Runtime: {} s\n".format(runtime) | ||||
print(results) | print(results) | ||||
class TestResult(): | class TestResult(): | ||||
""" | """ | ||||
Simple data structure to store test result values and print them properly | Simple data structure to store test result values and print them properly | ||||
""" | """ | ||||
def __init__(self, num, name, status, time, stdout, stderr): | def __init__(self, num, name, testdir, status, time, stdout, stderr): | ||||
self.num = num | self.num = num | ||||
self.name = name | self.name = name | ||||
self.testdir = testdir | |||||
self.status = status | self.status = status | ||||
self.time = time | self.time = time | ||||
self.padding = 0 | self.padding = 0 | ||||
self.stdout = stdout | self.stdout = stdout | ||||
self.stderr = stderr | self.stderr = stderr | ||||
def __repr__(self): | def __repr__(self): | ||||
if self.status == "Passed": | if self.status == "Passed": | ||||
▲ Show 20 Lines • Show All 212 Lines • Show Last 20 Lines |