Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show First 20 Lines • Show All 268 Lines • ▼ Show 20 Lines | else: | ||||
cutoff = sys.maxsize | cutoff = sys.maxsize | ||||
# Remove the test cases that the user has explicitly asked to exclude. | # Remove the test cases that the user has explicitly asked to exclude. | ||||
if args.exclude: | if args.exclude: | ||||
for exclude_test in args.exclude.split(','): | for exclude_test in args.exclude.split(','): | ||||
if exclude_test + ".py" in test_list: | if exclude_test + ".py" in test_list: | ||||
test_list.remove(exclude_test + ".py") | test_list.remove(exclude_test + ".py") | ||||
# Use and update timings from build_dir only if separate | |||||
# build directory is used. We do not want to pollute source directory. | |||||
build_timings = None | |||||
if (src_dir != build_dir): | |||||
build_timings = Timings(os.path.join(build_dir, 'timing.json')) | |||||
# Always use timings from scr_dir if present | |||||
src_timings = Timings(os.path.join( | |||||
src_dir, "test", "functional", 'timing.json')) | |||||
# Add test parameters and remove long running tests if needed | # Add test parameters and remove long running tests if needed | ||||
test_list = get_tests_to_run( | test_list = get_tests_to_run(test_list, TEST_PARAMS) | ||||
test_list, TEST_PARAMS, cutoff, src_timings, build_timings) | |||||
if not test_list: | if not test_list: | ||||
print("No valid test scripts specified. Check that your test is in one " | print("No valid test scripts specified. Check that your test is in one " | ||||
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | ||||
sys.exit(0) | sys.exit(0) | ||||
if args.help: | if args.help: | ||||
# Print help for test_runner.py, then print help of the first script | # Print help for test_runner.py, then print help of the first script | ||||
# and exit. | # and exit. | ||||
parser.print_help() | parser.print_help() | ||||
subprocess.check_call( | subprocess.check_call( | ||||
[os.path.join(tests_dir, test_list[0]), '-h']) | [os.path.join(tests_dir, test_list[0]), '-h']) | ||||
sys.exit(0) | sys.exit(0) | ||||
if not args.keepcache: | if not args.keepcache: | ||||
shutil.rmtree(os.path.join(build_dir, "test", | shutil.rmtree(os.path.join(build_dir, "test", | ||||
"cache"), ignore_errors=True) | "cache"), ignore_errors=True) | ||||
run_tests(test_list, build_dir, tests_dir, args.junitouput, | run_tests(test_list, build_dir, tests_dir, args.junitouput, | ||||
config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, build_timings, args.extended) | config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.extended) | ||||
def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], build_timings=None, run_extended=None): | def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], run_extended=False): | ||||
# Warn if bitcoind is already running (unix only) | # Warn if bitcoind is already running (unix only) | ||||
try: | try: | ||||
pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | ||||
if pidofOutput is not None and pidofOutput != b'': | if pidofOutput is not None and pidofOutput != b'': | ||||
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | ||||
BOLD[1], BOLD[0])) | BOLD[1], BOLD[0])) | ||||
except (OSError, subprocess.SubprocessError): | except (OSError, subprocess.SubprocessError): | ||||
pass | pass | ||||
Show All 31 Lines | def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], run_extended=False): | ||||
test_results = execute_test_processes( | test_results = execute_test_processes( | ||||
num_jobs, test_list, tests_dir, tmpdir, flags, run_extended) | num_jobs, test_list, tests_dir, tmpdir, flags, run_extended) | ||||
runtime = int(time.time() - time0) | runtime = int(time.time() - time0) | ||||
max_len_name = len(max(test_list, key=len)) | max_len_name = len(max(test_list, key=len)) | ||||
print_results(test_results, max_len_name, runtime) | print_results(test_results, max_len_name, runtime) | ||||
save_results_as_junit(test_results, junitouput, runtime) | save_results_as_junit(test_results, junitouput, runtime) | ||||
if (build_timings is not None): | |||||
build_timings.save_timings(test_results) | |||||
if coverage: | if coverage: | ||||
coverage.report_rpc_coverage() | coverage.report_rpc_coverage() | ||||
logging.debug("Cleaning up coverage data") | logging.debug("Cleaning up coverage data") | ||||
coverage.cleanup() | coverage.cleanup() | ||||
# Clear up the temp directory if all subdirectories are gone | # Clear up the temp directory if all subdirectories are gone | ||||
if not os.listdir(tmpdir): | if not os.listdir(tmpdir): | ||||
▲ Show 20 Lines • Show All 187 Lines • ▼ Show 20 Lines | |||||
def get_all_scripts_from_disk(test_dir, non_scripts): | def get_all_scripts_from_disk(test_dir, non_scripts): | ||||
""" | """ | ||||
Return all available test script from script directory (excluding NON_SCRIPTS) | Return all available test script from script directory (excluding NON_SCRIPTS) | ||||
""" | """ | ||||
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) | python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) | ||||
return list(python_files - set(non_scripts)) | return list(python_files - set(non_scripts)) | ||||
def get_tests_to_run(test_list, test_params, cutoff, src_timings, build_timings=None): | def get_tests_to_run(test_list, test_params): | ||||
""" | |||||
Returns all combinations of tests with testing flags | |||||
""" | """ | ||||
Returns only test that will not run longer that cutoff. | |||||
Long running tests are returned first to favor running tests in parallel | |||||
Timings from build directory override those from src directory | |||||
""" | |||||
def get_test_time(test): | |||||
if build_timings is not None: | |||||
timing = next( | |||||
(x['time'] for x in build_timings.existing_timings if x['name'] == test), None) | |||||
if timing is not None: | |||||
return timing | |||||
# try source directory. Return 0 if test is unknown to always run it | |||||
return next( | |||||
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0) | |||||
# Some tests must also be run with additional parameters. Add them to the list. | # Some tests must also be run with additional parameters. Add them to the list. | ||||
tests_with_params = [] | tests_with_params = [] | ||||
for test_name in test_list: | for test_name in test_list: | ||||
# always execute a test without parameters | # always execute a test without parameters | ||||
tests_with_params.append(test_name) | tests_with_params.append(test_name) | ||||
params = test_params.get(test_name) | params = test_params.get(test_name) | ||||
if params is not None: | if params is not None: | ||||
tests_with_params.extend( | tests_with_params.extend( | ||||
[test_name + " " + " ".join(p) for p in params]) | [test_name + " " + " ".join(p) for p in params]) | ||||
result = [t for t in tests_with_params if get_test_time(t) <= cutoff] | return tests_with_params | ||||
result.sort(key=lambda x: (-get_test_time(x), x)) | |||||
return result | |||||
class RPCCoverage(): | class RPCCoverage(): | ||||
""" | """ | ||||
Coverage reporting utilities for test_runner. | Coverage reporting utilities for test_runner. | ||||
Coverage calculation works by having each test script subprocess write | Coverage calculation works by having each test script subprocess write | ||||
coverage files into a particular directory. These files contain the RPC | coverage files into a particular directory. These files contain the RPC | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | for test_result in test_results: | ||||
ET.SubElement(e_test_case, "system-out").text = test_result.stdout | ET.SubElement(e_test_case, "system-out").text = test_result.stdout | ||||
ET.SubElement(e_test_case, "system-err").text = test_result.stderr | ET.SubElement(e_test_case, "system-err").text = test_result.stderr | ||||
ET.ElementTree(e_test_suite).write( | ET.ElementTree(e_test_suite).write( | ||||
file_name, "UTF-8", xml_declaration=True) | file_name, "UTF-8", xml_declaration=True) | ||||
class Timings(): | |||||
""" | |||||
Takes care of loading, merging and saving tests execution times. | |||||
""" | |||||
def __init__(self, timing_file): | |||||
self.timing_file = timing_file | |||||
self.existing_timings = self.load_timings() | |||||
def load_timings(self): | |||||
if os.path.isfile(self.timing_file): | |||||
with open(self.timing_file) as f: | |||||
return json.load(f) | |||||
else: | |||||
return [] | |||||
def get_merged_timings(self, new_timings): | |||||
""" | |||||
Return new list containing existing timings updated with new timings | |||||
Tests that do not exists are not removed | |||||
""" | |||||
key = 'name' | |||||
merged = {} | |||||
for item in self.existing_timings + new_timings: | |||||
if item[key] in merged: | |||||
merged[item[key]].update(item) | |||||
else: | |||||
merged[item[key]] = item | |||||
# Sort the result to preserve test ordering in file | |||||
merged = list(merged.values()) | |||||
merged.sort(key=lambda t, key=key: t[key]) | |||||
return merged | |||||
def save_timings(self, test_results): | |||||
# we only save test that have passed - timings for failed test might be | |||||
# wrong (timeouts or early fails) | |||||
passed_results = [t for t in test_results if t.status == 'Passed'] | |||||
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time}, | |||||
passed_results)) | |||||
merged_timings = self.get_merged_timings(new_timings) | |||||
with open(self.timing_file, 'w') as f: | |||||
json.dump(merged_timings, f, indent=True) | |||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
main() | main() |