Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show All 21 Lines | |||||
import time | import time | ||||
import shutil | import shutil | ||||
import signal | import signal | ||||
import sys | import sys | ||||
import subprocess | import subprocess | ||||
import tempfile | import tempfile | ||||
import re | import re | ||||
import logging | import logging | ||||
import xml.etree.ElementTree as ET | |||||
# Formatting. Default colors to empty strings. | # Formatting. Default colors to empty strings. | ||||
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") | BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") | ||||
try: | try: | ||||
# Make sure python thinks it can write unicode to its stdout | # Make sure python thinks it can write unicode to its stdout | ||||
"\u2713".encode("utf_8").decode(sys.stdout.encoding) | "\u2713".encode("utf_8").decode(sys.stdout.encoding) | ||||
TICK = "✓ " | TICK = "✓ " | ||||
CROSS = "✖ " | CROSS = "✖ " | ||||
▲ Show 20 Lines • Show All 126 Lines • ▼ Show 20 Lines | |||||
] | ] | ||||
def on_ci(): | def on_ci(): | ||||
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None | return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None | ||||
def main(): | def main(): | ||||
# Read config generated by configure. | |||||
config = configparser.ConfigParser() | |||||
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" | |||||
config.read_file(open(configfile)) | |||||
src_dir = config["environment"]["SRCDIR"] | |||||
tests_dir = src_dir + '/test/functional/' | |||||
# Parse arguments and pass through unrecognised args | # Parse arguments and pass through unrecognised args | ||||
parser = argparse.ArgumentParser(add_help=False, | parser = argparse.ArgumentParser(add_help=False, | ||||
usage='%(prog)s [test_runner.py options] [script options] [scripts]', | usage='%(prog)s [test_runner.py options] [script options] [scripts]', | ||||
description=__doc__, | description=__doc__, | ||||
epilog=''' | epilog=''' | ||||
Help text and arguments for individual test script:''', | Help text and arguments for individual test script:''', | ||||
formatter_class=argparse.RawTextHelpFormatter) | formatter_class=argparse.RawTextHelpFormatter) | ||||
parser.add_argument('--coverage', action='store_true', | parser.add_argument('--coverage', action='store_true', | ||||
Show All 9 Lines | def main(): | ||||
parser.add_argument('--jobs', '-j', type=int, default=4, | parser.add_argument('--jobs', '-j', type=int, default=4, | ||||
help='how many test scripts to run in parallel. Default=4.') | help='how many test scripts to run in parallel. Default=4.') | ||||
parser.add_argument('--keepcache', '-k', action='store_true', | parser.add_argument('--keepcache', '-k', action='store_true', | ||||
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') | help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') | ||||
parser.add_argument('--quiet', '-q', action='store_true', | parser.add_argument('--quiet', '-q', action='store_true', | ||||
help='only print results summary and failure logs') | help='only print results summary and failure logs') | ||||
parser.add_argument('--tmpdirprefix', '-t', | parser.add_argument('--tmpdirprefix', '-t', | ||||
default=tempfile.gettempdir(), help="Root directory for datadirs") | default=tempfile.gettempdir(), help="Root directory for datadirs") | ||||
parser.add_argument('--junitouput', '-ju', | |||||
default=tests_dir + 'junit_results.xml', help="file that will store JUnit formated test results ") | |||||
args, unknown_args = parser.parse_known_args() | args, unknown_args = parser.parse_known_args() | ||||
# Create a set to store arguments and create the passon string | # Create a set to store arguments and create the passon string | ||||
tests = set(arg for arg in unknown_args if arg[:2] != "--") | tests = set(arg for arg in unknown_args if arg[:2] != "--") | ||||
passon_args = [arg for arg in unknown_args if arg[:2] == "--"] | passon_args = [arg for arg in unknown_args if arg[:2] == "--"] | ||||
# Read config generated by configure. | |||||
config = configparser.ConfigParser() | |||||
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" | |||||
config.read_file(open(configfile)) | |||||
passon_args.append("--configfile=%s" % configfile) | passon_args.append("--configfile=%s" % configfile) | ||||
# Set up logging | # Set up logging | ||||
logging_level = logging.INFO if args.quiet else logging.DEBUG | logging_level = logging.INFO if args.quiet else logging.DEBUG | ||||
logging.basicConfig(format='%(message)s', level=logging_level) | logging.basicConfig(format='%(message)s', level=logging_level) | ||||
# Create base test directory | # Create base test directory | ||||
tmpdir = "%s/bitcoin_test_runner_%s" % ( | tmpdir = "%s/bitcoin_test_runner_%s" % ( | ||||
▲ Show 20 Lines • Show All 48 Lines • ▼ Show 20 Lines | if not test_list: | ||||
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | ||||
sys.exit(0) | sys.exit(0) | ||||
if args.help: | if args.help: | ||||
# Print help for test_runner.py, then print help of the first script | # Print help for test_runner.py, then print help of the first script | ||||
# and exit. | # and exit. | ||||
parser.print_help() | parser.print_help() | ||||
subprocess.check_call( | subprocess.check_call( | ||||
(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0]).split() + ['-h']) | (tests_dir + test_list[0]).split() + ['-h']) | ||||
sys.exit(0) | sys.exit(0) | ||||
check_script_list(config["environment"]["SRCDIR"]) | check_script_list(src_dir) | ||||
if not args.keepcache: | if not args.keepcache: | ||||
shutil.rmtree("%s/test/cache" % | shutil.rmtree("%s/test/cache" % | ||||
config["environment"]["BUILDDIR"], ignore_errors=True) | config["environment"]["BUILDDIR"], ignore_errors=True) | ||||
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], | run_tests(test_list, src_dir, config["environment"]["BUILDDIR"], tests_dir, args.junitouput, | ||||
config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args) | config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args) | ||||
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]): | def run_tests(test_list, src_dir, build_dir, tests_dir, junitouput, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]): | ||||
# Warn if bitcoind is already running (unix only) | # Warn if bitcoind is already running (unix only) | ||||
try: | try: | ||||
pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | ||||
if pidofOutput is not None and pidofOutput != b'': | if pidofOutput is not None and pidofOutput != b'': | ||||
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | ||||
BOLD[1], BOLD[0])) | BOLD[1], BOLD[0])) | ||||
except (OSError, subprocess.SubprocessError): | except (OSError, subprocess.SubprocessError): | ||||
pass | pass | ||||
# Warn if there is a cache directory | # Warn if there is a cache directory | ||||
cache_dir = "%s/test/cache" % build_dir | cache_dir = "%s/test/cache" % build_dir | ||||
if os.path.isdir(cache_dir): | if os.path.isdir(cache_dir): | ||||
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % ( | print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % ( | ||||
BOLD[1], BOLD[0], cache_dir)) | BOLD[1], BOLD[0], cache_dir)) | ||||
# Set env vars | # Set env vars | ||||
if "BITCOIND" not in os.environ: | if "BITCOIND" not in os.environ: | ||||
os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext | os.environ["BITCOIND"] = build_dir + '/src/bitcoind' + exeext | ||||
os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext | os.environ["BITCOINCLI"] = build_dir + '/src/bitcoin-cli' + exeext | ||||
tests_dir = src_dir + '/test/functional/' | |||||
flags = ["--srcdir={}/src".format(build_dir)] + args | flags = ["--srcdir={}/src".format(build_dir)] + args | ||||
flags.append("--cachedir=%s" % cache_dir) | flags.append("--cachedir=%s" % cache_dir) | ||||
if enable_coverage: | if enable_coverage: | ||||
coverage = RPCCoverage() | coverage = RPCCoverage() | ||||
flags.append(coverage.flag) | flags.append(coverage.flag) | ||||
logging.debug("Initializing coverage directory at %s" % coverage.dir) | logging.debug("Initializing coverage directory at %s" % coverage.dir) | ||||
else: | else: | ||||
coverage = None | coverage = None | ||||
if len(test_list) > 1 and jobs > 1: | if len(test_list) > 1 and jobs > 1: | ||||
# Populate cache | # Populate cache | ||||
subprocess.check_output( | subprocess.check_output( | ||||
[tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir]) | [tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir]) | ||||
# Run Tests | # Run Tests | ||||
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) | job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) | ||||
time0 = time.time() | time0 = time.time() | ||||
test_results = [] | test_results = [] | ||||
max_len_name = len(max(test_list, key=len)) | max_len_name = len(max(test_list, key=len)) | ||||
for _ in range(len(test_list)): | for _ in range(len(test_list)): | ||||
test_result, stdout, stderr = job_queue.get_next() | test_result = job_queue.get_next() | ||||
test_results.append(test_result) | test_results.append(test_result) | ||||
if test_result.status == "Passed": | if test_result.status == "Passed": | ||||
logging.debug("\n%s%s%s passed, Duration: %s s" % ( | logging.debug("\n%s%s%s passed, Duration: %s s" % ( | ||||
BOLD[1], test_result.name, BOLD[0], test_result.time)) | BOLD[1], test_result.name, BOLD[0], test_result.time)) | ||||
elif test_result.status == "Skipped": | elif test_result.status == "Skipped": | ||||
logging.debug("\n%s%s%s skipped" % | logging.debug("\n%s%s%s skipped" % | ||||
(BOLD[1], test_result.name, BOLD[0])) | (BOLD[1], test_result.name, BOLD[0])) | ||||
else: | else: | ||||
print("\n%s%s%s failed, Duration: %s s\n" % | print("\n%s%s%s failed, Duration: %s s\n" % | ||||
(BOLD[1], test_result.name, BOLD[0], test_result.time)) | (BOLD[1], test_result.name, BOLD[0], test_result.time)) | ||||
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') | print(BOLD[1] + 'stdout:\n' + BOLD[0] + test_result.stdout + '\n') | ||||
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') | print(BOLD[1] + 'stderr:\n' + BOLD[0] + test_result.stderr + '\n') | ||||
print_results(test_results, max_len_name, (int(time.time() - time0))) | runtime = int(time.time() - time0) | ||||
print_results(test_results, max_len_name, runtime) | |||||
save_results_as_junit(test_results, junitouput, runtime) | |||||
if coverage: | if coverage: | ||||
coverage.report_rpc_coverage() | coverage.report_rpc_coverage() | ||||
logging.debug("Cleaning up coverage data") | logging.debug("Cleaning up coverage data") | ||||
coverage.cleanup() | coverage.cleanup() | ||||
# Clear up the temp directory if all subdirectories are gone | # Clear up the temp directory if all subdirectories are gone | ||||
▲ Show 20 Lines • Show All 87 Lines • ▼ Show 20 Lines | def get_next(self): | ||||
status = "Passed" | status = "Passed" | ||||
elif proc.returncode == TEST_EXIT_SKIPPED: | elif proc.returncode == TEST_EXIT_SKIPPED: | ||||
status = "Skipped" | status = "Skipped" | ||||
else: | else: | ||||
status = "Failed" | status = "Failed" | ||||
self.num_running -= 1 | self.num_running -= 1 | ||||
self.jobs.remove(j) | self.jobs.remove(j) | ||||
return TestResult(name, status, int(time.time() - time0)), stdout, stderr | return TestResult(name, status, int(time.time() - time0), stdout, stderr) | ||||
print('.', end='', flush=True) | print('.', end='', flush=True) | ||||
class TestResult(): | class TestResult(): | ||||
def __init__(self, name, status, time): | def __init__(self, name, status, time, stdout, stderr): | ||||
self.name = name | self.name = name | ||||
self.status = status | self.status = status | ||||
self.time = time | self.time = time | ||||
self.padding = 0 | self.padding = 0 | ||||
self.stdout = stdout | |||||
self.stderr = stderr | |||||
def __repr__(self): | def __repr__(self): | ||||
if self.status == "Passed": | if self.status == "Passed": | ||||
color = BLUE | color = BLUE | ||||
glyph = TICK | glyph = TICK | ||||
elif self.status == "Failed": | elif self.status == "Failed": | ||||
color = RED | color = RED | ||||
glyph = CROSS | glyph = CROSS | ||||
▲ Show 20 Lines • Show All 84 Lines • ▼ Show 20 Lines | def _get_uncovered_rpc_commands(self): | ||||
for filename in coverage_filenames: | for filename in coverage_filenames: | ||||
with open(filename, 'r') as f: | with open(filename, 'r') as f: | ||||
covered_cmds.update([i.strip() for i in f.readlines()]) | covered_cmds.update([i.strip() for i in f.readlines()]) | ||||
return all_cmds - covered_cmds | return all_cmds - covered_cmds | ||||
def save_results_as_junit(test_results, file_name, time): | |||||
""" | |||||
Save tests results to file in JUnit format | |||||
See http://llg.cubic.org/docs/junit/ for specification of format | |||||
""" | |||||
e_test_suite = ET.Element("testsuite", | |||||
{"name": "bitcoin_abc_tests", | |||||
"tests": str(len(test_results)), | |||||
#"errors": | |||||
"failures": str(len([t for t in test_results if t.status == "Failed"])), | |||||
"id": "0", | |||||
"skipped": str(len([t for t in test_results if t.status == "Skipped"])), | |||||
"time": str(time), | |||||
"timestamp": datetime.datetime.now().isoformat('T') | |||||
}) | |||||
for test_result in test_results: | |||||
e_test_case = ET.SubElement(e_test_suite, "testcase", | |||||
{"name": test_result.name, | |||||
"classname": test_result.name, | |||||
"time": str(test_result.time) | |||||
} | |||||
) | |||||
if test_result.status == "Skipped": | |||||
ET.SubElement(e_test_case, "skipped") | |||||
elif test_result.status == "Failed": | |||||
ET.SubElement(e_test_case, "failure") | |||||
# no special element for passed tests | |||||
ET.SubElement(e_test_case, "system-out").text = test_result.stdout | |||||
ET.SubElement(e_test_case, "system-err").text = test_result.stderr | |||||
ET.ElementTree(e_test_suite).write( | |||||
file_name, "UTF-8", xml_declaration=True) | |||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
main() | main() |