Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show All 22 Lines | |||||
import shutil | import shutil | ||||
import signal | import signal | ||||
import sys | import sys | ||||
import subprocess | import subprocess | ||||
import tempfile | import tempfile | ||||
import re | import re | ||||
import logging | import logging | ||||
import xml.etree.ElementTree as ET | import xml.etree.ElementTree as ET | ||||
import json | |||||
# Formatting. Default colors to empty strings. | # Formatting. Default colors to empty strings. | ||||
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") | BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") | ||||
try: | try: | ||||
# Make sure python thinks it can write unicode to its stdout | # Make sure python thinks it can write unicode to its stdout | ||||
"\u2713".encode("utf_8").decode(sys.stdout.encoding) | "\u2713".encode("utf_8").decode(sys.stdout.encoding) | ||||
TICK = "✓ " | TICK = "✓ " | ||||
CROSS = "✖ " | CROSS = "✖ " | ||||
▲ Show 20 Lines • Show All 224 Lines • ▼ Show 20 Lines | else: | ||||
# longer sorted. | # longer sorted. | ||||
# Remove the test cases that the user has explicitly asked to exclude. | # Remove the test cases that the user has explicitly asked to exclude. | ||||
if args.exclude: | if args.exclude: | ||||
for exclude_test in args.exclude.split(','): | for exclude_test in args.exclude.split(','): | ||||
if exclude_test + ".py" in test_list: | if exclude_test + ".py" in test_list: | ||||
test_list.remove(exclude_test + ".py") | test_list.remove(exclude_test + ".py") | ||||
# Use and update timings from build_dir only if separate | |||||
# build directory is used. We do not want to pollute source directory. | |||||
build_timings = None | |||||
if (src_dir != build_dir): | |||||
build_timings = Timings(build_dir) | |||||
if not test_list: | if not test_list: | ||||
print("No valid test scripts specified. Check that your test is in one " | print("No valid test scripts specified. Check that your test is in one " | ||||
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | ||||
sys.exit(0) | sys.exit(0) | ||||
if args.help: | if args.help: | ||||
# Print help for test_runner.py, then print help of the first script | # Print help for test_runner.py, then print help of the first script | ||||
# and exit. | # and exit. | ||||
parser.print_help() | parser.print_help() | ||||
subprocess.check_call( | subprocess.check_call( | ||||
[os.path.join(tests_dir, test_list[0]), '-h']) | [os.path.join(tests_dir, test_list[0]), '-h']) | ||||
sys.exit(0) | sys.exit(0) | ||||
check_script_list(src_dir) | check_script_list(src_dir) | ||||
if not args.keepcache: | if not args.keepcache: | ||||
shutil.rmtree(os.path.join(build_dir, "test", | shutil.rmtree(os.path.join(build_dir, "test", | ||||
"cache"), ignore_errors=True) | "cache"), ignore_errors=True) | ||||
run_tests(test_list, src_dir, build_dir, tests_dir, args.junitouput, | run_tests(test_list, build_dir, tests_dir, args.junitouput, | ||||
config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args) | config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, build_timings) | ||||
def run_tests(test_list, src_dir, build_dir, tests_dir, junitouput, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]): | def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], build_timings=None): | ||||
# Warn if bitcoind is already running (unix only) | # Warn if bitcoind is already running (unix only) | ||||
try: | try: | ||||
pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | ||||
if pidofOutput is not None and pidofOutput != b'': | if pidofOutput is not None and pidofOutput != b'': | ||||
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | ||||
BOLD[1], BOLD[0])) | BOLD[1], BOLD[0])) | ||||
except (OSError, subprocess.SubprocessError): | except (OSError, subprocess.SubprocessError): | ||||
pass | pass | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | for _ in range(len(test_list)): | ||||
(BOLD[1], test_result.name, BOLD[0], test_result.time)) | (BOLD[1], test_result.name, BOLD[0], test_result.time)) | ||||
print(BOLD[1] + 'stdout:\n' + BOLD[0] + test_result.stdout + '\n') | print(BOLD[1] + 'stdout:\n' + BOLD[0] + test_result.stdout + '\n') | ||||
print(BOLD[1] + 'stderr:\n' + BOLD[0] + test_result.stderr + '\n') | print(BOLD[1] + 'stderr:\n' + BOLD[0] + test_result.stderr + '\n') | ||||
runtime = int(time.time() - time0) | runtime = int(time.time() - time0) | ||||
print_results(test_results, max_len_name, runtime) | print_results(test_results, max_len_name, runtime) | ||||
save_results_as_junit(test_results, junitouput, runtime) | save_results_as_junit(test_results, junitouput, runtime) | ||||
if (build_timings is not None): | |||||
build_timings.save_timings(test_results) | |||||
if coverage: | if coverage: | ||||
coverage.report_rpc_coverage() | coverage.report_rpc_coverage() | ||||
logging.debug("Cleaning up coverage data") | logging.debug("Cleaning up coverage data") | ||||
coverage.cleanup() | coverage.cleanup() | ||||
# Clear up the temp directory if all subdirectories are gone | # Clear up the temp directory if all subdirectories are gone | ||||
if not os.listdir(tmpdir): | if not os.listdir(tmpdir): | ||||
▲ Show 20 Lines • Show All 235 Lines • ▼ Show 20 Lines | for test_result in test_results: | ||||
ET.SubElement(e_test_case, "system-out").text = test_result.stdout | ET.SubElement(e_test_case, "system-out").text = test_result.stdout | ||||
ET.SubElement(e_test_case, "system-err").text = test_result.stderr | ET.SubElement(e_test_case, "system-err").text = test_result.stderr | ||||
ET.ElementTree(e_test_suite).write( | ET.ElementTree(e_test_suite).write( | ||||
file_name, "UTF-8", xml_declaration=True) | file_name, "UTF-8", xml_declaration=True) | ||||
class Timings(): | |||||
""" | |||||
Takes care of loading, merging and saving tests execution times. | |||||
""" | |||||
def __init__(self, dir): | |||||
self.dir = dir | |||||
self.timing_file = os.path.join(dir, 'timing.json') | |||||
self.existing_timings = self.load_timings() | |||||
def load_timings(self): | |||||
if os.path.isfile(self.timing_file): | |||||
with open(self.timing_file) as f: | |||||
return json.load(f) | |||||
else: | |||||
return [] | |||||
def get_merged_timings(self, new_timings): | |||||
""" | |||||
Return new list containing existing timings updated with new timings | |||||
Tests that do not exists are not removed | |||||
""" | |||||
key = 'name' | |||||
merged = {} | |||||
for item in self.existing_timings + new_timings: | |||||
if item[key] in merged: | |||||
merged[item[key]].update(item) | |||||
else: | |||||
merged[item[key]] = item | |||||
# Sort the result to preserve test ordering in file | |||||
merged = list(merged.values()) | |||||
merged.sort(key=lambda t, key=key: t[key]) | |||||
return merged | |||||
def save_timings(self, test_results): | |||||
# we only save test that have passed - timings for failed test might be | |||||
# wrong (timeouts or early fails) | |||||
passed_results = [t for t in test_results if t.status == 'Passed'] | |||||
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time}, | |||||
passed_results)) | |||||
merged_timings = self.get_merged_timings(new_timings) | |||||
with open(self.timing_file, 'w') as f: | |||||
json.dump(merged_timings, f, indent=True) | |||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
main() | main() |