diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -27,6 +27,7 @@ import tempfile import re import logging +import xml.etree.ElementTree as ET # Formatting. Default colors to empty strings. BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") @@ -169,6 +170,14 @@ def main(): + # Read config generated by configure. + config = configparser.ConfigParser() + configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" + config.read_file(open(configfile)) + + src_dir = config["environment"]["SRCDIR"] + tests_dir = src_dir + '/test/functional/' + # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', @@ -194,17 +203,13 @@ help='only print results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") + parser.add_argument('--junitouput', '-ju', + default=tests_dir + 'junit_results.xml', help="file that will store JUnit formated test results ") args, unknown_args = parser.parse_known_args() # Create a set to store arguments and create the passon string tests = set(arg for arg in unknown_args if arg[:2] != "--") passon_args = [arg for arg in unknown_args if arg[:2] == "--"] - - # Read config generated by configure. - config = configparser.ConfigParser() - configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" - config.read_file(open(configfile)) - passon_args.append("--configfile=%s" % configfile) # Set up logging @@ -269,20 +274,20 @@ # and exit. parser.print_help() subprocess.check_call( - (config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0]).split() + ['-h']) + (tests_dir + test_list[0]).split() + ['-h']) sys.exit(0) - check_script_list(config["environment"]["SRCDIR"]) + check_script_list(src_dir) if not args.keepcache: shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) - run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], + run_tests(test_list, src_dir, config["environment"]["BUILDDIR"], tests_dir, args.junitouput, config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args) -def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]): +def run_tests(test_list, src_dir, build_dir, tests_dir, junitouput, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]): # Warn if bitcoind is already running (unix only) try: pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) @@ -303,8 +308,6 @@ os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext - tests_dir = src_dir + '/test/functional/' - flags = ["--srcdir={}/src".format(build_dir)] + args flags.append("--cachedir=%s" % cache_dir) @@ -328,7 +331,7 @@ max_len_name = len(max(test_list, key=len)) for _ in range(len(test_list)): - test_result, stdout, stderr = job_queue.get_next() + test_result = job_queue.get_next() test_results.append(test_result) if test_result.status == "Passed": @@ -340,10 +343,12 @@ else: print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) - print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') - print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') + print(BOLD[1] + 'stdout:\n' + BOLD[0] + test_result.stdout + '\n') + print(BOLD[1] + 'stderr:\n' + BOLD[0] + test_result.stderr + '\n') - print_results(test_results, max_len_name, (int(time.time() - time0))) + runtime = int(time.time() - time0) + print_results(test_results, max_len_name, runtime) + save_results_as_junit(test_results, junitouput, runtime) if coverage: coverage.report_rpc_coverage() @@ -447,16 +452,18 @@ self.num_running -= 1 self.jobs.remove(j) - return TestResult(name, status, int(time.time() - time0)), stdout, stderr + return TestResult(name, status, int(time.time() - time0), stdout, stderr) print('.', end='', flush=True) class TestResult(): - def __init__(self, name, status, time): + def __init__(self, name, status, time, stdout, stderr): self.name = name self.status = status self.time = time self.padding = 0 + self.stdout = stdout + self.stderr = stderr def __repr__(self): if self.status == "Passed": @@ -557,5 +564,42 @@ return all_cmds - covered_cmds +def save_results_as_junit(test_results, file_name, time): + """ + Save tests results to file in JUnit format + + See http://llg.cubic.org/docs/junit/ for specification of format + """ + e_test_suite = ET.Element("testsuite", + {"name": "bitcoin_abc_tests", + "tests": str(len(test_results)), + #"errors": + "failures": str(len([t for t in test_results if t.status == "Failed"])), + "id": "0", + "skipped": str(len([t for t in test_results if t.status == "Skipped"])), + "time": str(time), + "timestamp": datetime.datetime.now().isoformat('T') + }) + + for test_result in test_results: + e_test_case = ET.SubElement(e_test_suite, "testcase", + {"name": test_result.name, + "classname": test_result.name, + "time": str(test_result.time) + } + ) + if test_result.status == "Skipped": + ET.SubElement(e_test_case, "skipped") + elif test_result.status == "Failed": + ET.SubElement(e_test_case, "failure") + # no special element for passed tests + + ET.SubElement(e_test_case, "system-out").text = test_result.stdout + ET.SubElement(e_test_case, "system-err").text = test_result.stderr + + ET.ElementTree(e_test_suite).write( + file_name, "UTF-8", xml_declaration=True) + + if __name__ == '__main__': main()