Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show All 29 Lines | |||||
import xml.etree.ElementTree as ET | import xml.etree.ElementTree as ET | ||||
import json | import json | ||||
import threading | import threading | ||||
import multiprocessing | import multiprocessing | ||||
import importlib | import importlib | ||||
import importlib.util | import importlib.util | ||||
import inspect | import inspect | ||||
import multiprocessing as mp | import multiprocessing as mp | ||||
import random | |||||
from queue import Full, Empty | from queue import Full, Empty | ||||
from io import StringIO | from io import StringIO | ||||
from test_framework.test_framework import BitcoinTestFramework, ComparisonTestFramework, TestStatus | from test_framework.test_framework import BitcoinTestFramework, ComparisonTestFramework, TestStatus | ||||
from test_framework.cdefs import get_srcdir | from test_framework.cdefs import get_srcdir | ||||
# Formatting. Default colors to empty strings. | # Formatting. Default colors to empty strings. | ||||
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") | BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") | ||||
Show All 40 Lines | |||||
# Used to limit the number of tests, when list of tests is not provided on command line | # Used to limit the number of tests, when list of tests is not provided on command line | ||||
# When --extended is specified, we run all tests, otherwise | # When --extended is specified, we run all tests, otherwise | ||||
# we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF | # we only run a test if its execution time in seconds does not exceed EXTENDED_CUTOFF | ||||
DEFAULT_EXTENDED_CUTOFF = 40 | DEFAULT_EXTENDED_CUTOFF = 40 | ||||
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 | DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 | ||||
class TestCase(): | class TestStarted(): | ||||
def __init__(self, test_file, test_name): | |||||
self.test_file = test_file | |||||
self.test_name = test_name | |||||
class TestFile(): | |||||
""" | """ | ||||
Data structure to hold and run information necessary to launch a test case. | Data structure to hold and run information necessary to launch test cases. | ||||
""" | """ | ||||
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None): | def __init__(self, test_file, tests_dir, tmpdir): | ||||
self.tests_dir = tests_dir | self.tests_dir = tests_dir | ||||
self.tmpdir = tmpdir | self.tmpdir = tmpdir | ||||
self.test_case = test_case | self.test_file = test_file | ||||
self.test_num = test_num | |||||
self.flags = flags | |||||
def run(self, portseed_offset, run_tags): | |||||
t = self.test_case | |||||
portseed = self.test_num * 1000 + portseed_offset | |||||
portseed_arg = ["--portseed={}".format(portseed)] | |||||
test_argv = t.split() | |||||
tmpdir = [os.path.join("--tmpdir=%s", "%s_%s") % | |||||
(self.tmpdir, re.sub(".py$", "", t), portseed)] | |||||
name = t | |||||
time0 = time.time() | |||||
def find_and_run_tests(self, update_queue, run_tags, base_flags): | |||||
param_sets = get_test_parameters(self.test_file, TEST_PARAMS) | |||||
test_modulepath = None | test_modulepath = None | ||||
try: | try: | ||||
# Dynamically import the test so we can introspect it | # Dynamically import the test so we can introspect it | ||||
test_modulepath = os.path.abspath( | test_modulepath = os.path.abspath( | ||||
os.path.join(self.tests_dir, test_argv[0])) | os.path.join(self.tests_dir, self.test_file)) | ||||
test_spec = importlib.util.spec_from_file_location( | test_spec = importlib.util.spec_from_file_location( | ||||
os.path.splitext(test_argv[0])[0], test_modulepath) | os.path.splitext(self.test_file)[0], test_modulepath) | ||||
test_module = importlib.util.module_from_spec(test_spec) | test_module = importlib.util.module_from_spec(test_spec) | ||||
test_spec.loader.exec_module(test_module) | test_spec.loader.exec_module(test_module) | ||||
except Exception as e: | except Exception as e: | ||||
return TestResult(name, "Failed", 0, "", str(e)) | print("Test file failed to parse:") | ||||
print(e) | |||||
TestResult(self.test_file, TestStatus.Failed, 0, "", str(e)) | |||||
return | |||||
try: | # Store our test cases before running them so we can do some accouninting in order to keep old test names where applicable. | ||||
# We don't want to lose test results in CI. | |||||
test_cases = [] | |||||
for prop in dir(test_module): | |||||
obj = getattr(test_module, prop) | |||||
if inspect.isclass(obj) and issubclass(obj, BitcoinTestFramework) and \ | |||||
obj is not BitcoinTestFramework and obj is not ComparisonTestFramework: | |||||
# Give every test the fast tag by default unless otherwise specified | |||||
tags = ["fast"] | |||||
if hasattr(obj, 'test_tags'): | |||||
tags = obj.test_tags | |||||
if not compare_tags(tags, run_tags): | |||||
continue | |||||
test_cases.append(obj) | |||||
for test_case in test_cases: | |||||
for param_set in param_sets: | |||||
test_instance = test_case() | |||||
# For compatible with old test printer. | |||||
# TODO: Update test result printing | |||||
legacy_name = " ".join( | |||||
[self.test_file, obj.__name__] + param_set) | |||||
# Use the old name if there's only one test in the file. | |||||
if len(test_cases) == 1: | |||||
legacy_name = " ".join([self.test_file] + param_set) | |||||
update_queue.put(TestStarted(self.test_file, legacy_name)) | |||||
test_result = self.run_test( | |||||
test_instance, obj.__name__, param_set, legacy_name, base_flags, run_tags) | |||||
update_queue.put(test_result) | |||||
def run_test(self, test_instance, test_name, param_set, legacy_name, base_flags, run_tags): | |||||
time0 = time.time() | |||||
portseed = random.randint(2**15, 2**16) | |||||
# Setup output capturing | # Setup output capturing | ||||
original_stdout = sys.stdout | original_stdout = sys.stdout | ||||
original_stderr = sys.stderr | original_stderr = sys.stderr | ||||
test_stdout = StringIO() | test_stdout = StringIO() | ||||
test_stderr = StringIO() | test_stderr = StringIO() | ||||
sys.stdout = test_stdout | sys.stdout = test_stdout | ||||
sys.stderr = test_stderr | sys.stderr = test_stderr | ||||
exit_code = TestStatus.SKIPPED | |||||
exit_code = None | param_set.extend(["--portseed={}".format(portseed), | ||||
for prop in dir(test_module): | "--tmpdir=" + os.path.join(self.tmpdir, re.sub(".py$", "", self.test_file) + "_" + test_name + "_".join(param_set) + "_" + str(portseed))]) | ||||
obj = getattr(test_module, prop) | |||||
if inspect.isclass(obj) and issubclass(obj, BitcoinTestFramework) and obj is not BitcoinTestFramework and obj is not ComparisonTestFramework: | |||||
test_instance = obj() | |||||
# Give every test the fast tag by default unless otherwise specified | try: | ||||
tags = ["fast"] | # Use our argv. When we import tests, argparse expects this. | ||||
if hasattr(test_instance, 'test_tags'): | exit_code = test_instance.main(base_flags + param_set) | ||||
tags = test_instance.test_tags | |||||
if compare_tags(tags, run_tags): | |||||
exit_code = obj().main( | |||||
test_argv[1:] + self.flags + portseed_arg + tmpdir) | |||||
else: | |||||
exit_code = TestStatus.SKIPPED | |||||
except Exception as e: | except Exception as e: | ||||
print(e) | print(e) | ||||
exit_code = TestStatus.FAILED | |||||
finally: | finally: | ||||
sys.stdout = original_stdout | sys.stdout = original_stdout | ||||
sys.stderr = original_stderr | sys.stderr = original_stderr | ||||
[stdout, stderr] = [test_stdout.getvalue(), test_stderr.getvalue()] | [stdout, stderr] = [test_stdout.getvalue(), test_stderr.getvalue()] | ||||
test_stdout.close(), test_stderr.close() | test_stdout.close(), test_stderr.close() | ||||
if exit_code == TestStatus.PASSED and stderr == "": | if exit_code == TestStatus.PASSED and stderr == "": | ||||
status = "Passed" | status = "Passed" | ||||
elif exit_code == TestStatus.SKIPPED: | elif exit_code == TestStatus.SKIPPED: | ||||
status = "Skipped" | status = "Skipped" | ||||
else: | else: | ||||
status = "Failed" | status = "Failed" | ||||
return TestResult(name, status, int(time.time() - time0), stdout, stderr) | return TestResult(legacy_name, status, int(time.time() - time0), stdout, stderr) | ||||
def on_ci(): | def on_ci(): | ||||
return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None | return os.getenv('TRAVIS') == 'true' or os.getenv('TEAMCITY_VERSION') != None | ||||
def main(): | def main(): | ||||
# Read config generated by configure. | # Read config generated by configure. | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | else: | ||||
test_list = all_scripts | test_list = all_scripts | ||||
# Remove the test cases that the user has explicitly asked to exclude. | # Remove the test cases that the user has explicitly asked to exclude. | ||||
if args.exclude: | if args.exclude: | ||||
for exclude_test in args.exclude.split(','): | for exclude_test in args.exclude.split(','): | ||||
if exclude_test + ".py" in test_list: | if exclude_test + ".py" in test_list: | ||||
test_list.remove(exclude_test + ".py") | test_list.remove(exclude_test + ".py") | ||||
# Add test parameters and remove long running tests if needed | |||||
test_list = get_tests_to_run(test_list, TEST_PARAMS) | |||||
if not test_list: | if not test_list: | ||||
print("No valid test scripts specified. Check that your test is in one " | print("No valid test scripts specified. Check that your test is in one " | ||||
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") | ||||
sys.exit(0) | sys.exit(0) | ||||
if args.help: | if args.help: | ||||
# Print help for test_runner.py, then print help of the first script | # Print help for test_runner.py, then print help of the first script | ||||
# and exit. | # and exit. | ||||
▲ Show 20 Lines • Show All 49 Lines • ▼ Show 20 Lines | if len(test_list) > 1 and num_jobs > 1: | ||||
[os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir=%s", "cache") % tmpdir]) | [os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir=%s", "cache") % tmpdir]) | ||||
# Run Tests | # Run Tests | ||||
time0 = time.time() | time0 = time.time() | ||||
test_results = execute_test_processes( | test_results = execute_test_processes( | ||||
num_jobs, test_list, tests_dir, tmpdir, flags, tags) | num_jobs, test_list, tests_dir, tmpdir, flags, tags) | ||||
runtime = int(time.time() - time0) | runtime = int(time.time() - time0) | ||||
max_len_name = len(max(test_list, key=len)) | max_len_name = len(max([result.name for result in test_results], key=len)) | ||||
print_results(test_results, max_len_name, runtime) | print_results(test_results, max_len_name, runtime) | ||||
save_results_as_junit(test_results, junitouput, runtime) | save_results_as_junit(test_results, junitouput, runtime) | ||||
if coverage: | if coverage: | ||||
coverage.report_rpc_coverage() | coverage.report_rpc_coverage() | ||||
logging.debug("Cleaning up coverage data") | logging.debug("Cleaning up coverage data") | ||||
coverage.cleanup() | coverage.cleanup() | ||||
Show All 11 Lines | |||||
# Define some helper functions we will need for threading. | # Define some helper functions we will need for threading. | ||||
## | ## | ||||
def handle_message(message, running_jobs, results_queue): | def handle_message(message, running_jobs, results_queue): | ||||
""" | """ | ||||
handle_message handles a single message from handle_test_cases | handle_message handles a single message from handle_test_cases | ||||
""" | """ | ||||
if isinstance(message, TestCase): | if isinstance(message, TestStarted): | ||||
running_jobs.add(message.test_case) | running_jobs.add(message.test_name) | ||||
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0])) | print("{}{}{} started".format(BOLD[1], message.test_name, BOLD[0])) | ||||
return | return | ||||
if isinstance(message, TestResult): | if isinstance(message, TestResult): | ||||
test_result = message | test_result = message | ||||
running_jobs.remove(test_result.name) | running_jobs.remove(test_result.name) | ||||
results_queue.put(test_result) | results_queue.put(test_result) | ||||
if test_result.status == "Passed": | if test_result.status == "Passed": | ||||
print("%s%s%s passed, Duration: %s s" % ( | print("%s%s%s passed, Duration: %s s" % ( | ||||
BOLD[1], test_result.name, BOLD[0], test_result.time)) | BOLD[1], test_result.name, BOLD[0], test_result.time)) | ||||
Show All 36 Lines | while True: | ||||
except Empty as e: | except Empty as e: | ||||
if not on_ci(): | if not on_ci(): | ||||
print("Running jobs: {}".format( | print("Running jobs: {}".format( | ||||
", ".join(running_jobs)), end="\r") | ", ".join(running_jobs)), end="\r") | ||||
sys.stdout.flush() | sys.stdout.flush() | ||||
printed_status = True | printed_status = True | ||||
def handle_test_cases(job_queue, update_queue, tags): | def handle_test_files(job_queue, update_queue, tags, base_flags): | ||||
""" | """ | ||||
job_runner represents a single thread that is part of a worker pool. | job_runner represents a single thread that is part of a worker pool. | ||||
It waits for a test, then executes that test. It also reports start | It waits for a test, then executes that test. It also reports start | ||||
and result messages to handle_update_messages. | and result messages to handle_update_messages. | ||||
""" | """ | ||||
# In case there is a graveyard of zombie bitcoinds, we can apply a | # In case there is a graveyard of zombie bitcoinds, we can apply a | ||||
# pseudorandom offset to hopefully jump over them. | # pseudorandom offset to hopefully jump over them. | ||||
# (625 is PORT_RANGE/MAX_NODES) | # (625 is PORT_RANGE/MAX_NODES) | ||||
portseed_offset = int(time.time() * 1000) % 625 | |||||
while True: | while True: | ||||
test = job_queue.get() | test = job_queue.get() | ||||
if test is None: | if test is None: | ||||
break | break | ||||
# Signal that the test is starting to inform the poor waiting | # Signal that the test is starting to inform the poor waiting | ||||
# programmer | # programmer | ||||
update_queue.put(test) | test.find_and_run_tests(update_queue, tags, base_flags) | ||||
result = test.run(portseed_offset, tags) | |||||
update_queue.put(result) | |||||
job_queue.task_done() | job_queue.task_done() | ||||
def compare_tags(test_tags, run_tags): | def compare_tags(test_tags, run_tags): | ||||
""" | """ | ||||
Compare two sets of tags. Tags are evaludated in order, so if an | Compare two sets of tags. Tags are evaludated in order, so if an | ||||
include is specified after an exclusion, then we will still run the test. | include is specified after an exclusion, then we will still run the test. | ||||
""" | """ | ||||
Show All 22 Lines | def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags, tags): | ||||
## | ## | ||||
# Start our result collection thread. | # Start our result collection thread. | ||||
t = ctx.Process(target=handle_update_messages, | t = ctx.Process(target=handle_update_messages, | ||||
args=(update_queue, results_queue,)) | args=(update_queue, results_queue,)) | ||||
t.start() | t.start() | ||||
# Start some worker threads | # Start some worker threads | ||||
for j in range(num_jobs): | for j in range(num_jobs): | ||||
t = ctx.Process(target=handle_test_cases, | t = ctx.Process(target=handle_test_files, | ||||
args=(job_queue, update_queue, tags,)) | args=(job_queue, update_queue, tags, flags,)) | ||||
t.start() | t.start() | ||||
# Push all our test cases into the job queue. | # Push all our test files into the job queue. | ||||
for i, t in enumerate(test_list): | for _, t in enumerate(test_list): | ||||
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags)) | job_queue.put(TestFile(t, tests_dir, tmpdir)) | ||||
# Wait for all the jobs to be completed | # Wait for all the jobs to be completed | ||||
job_queue.join() | job_queue.join() | ||||
# Wait for all the results to be compiled | # Wait for all the results to be compiled | ||||
update_queue.join() | update_queue.join() | ||||
# Flush our queues so the threads exit | # Flush our queues so the threads exit | ||||
▲ Show 20 Lines • Show All 60 Lines • ▼ Show 20 Lines | |||||
def get_all_scripts_from_disk(test_dir, non_scripts): | def get_all_scripts_from_disk(test_dir, non_scripts): | ||||
""" | """ | ||||
Return all available test script from script directory (excluding NON_SCRIPTS) | Return all available test script from script directory (excluding NON_SCRIPTS) | ||||
""" | """ | ||||
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) | python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) | ||||
return list(python_files - set(non_scripts)) | return list(python_files - set(non_scripts)) | ||||
def get_tests_to_run(test_list, test_params): | def get_test_parameters(test_file, test_params): | ||||
""" | """ | ||||
Returns all combinations of tests with testing flags | Returns all combinations of a test_file with testing flags | ||||
""" | """ | ||||
# Some tests must also be run with additional parameters. Add them to the list. | # Some tests must also be run with additional parameters. Add them to the list. | ||||
tests_with_params = [] | |||||
for test_name in test_list: | |||||
# always execute a test without parameters | # always execute a test without parameters | ||||
tests_with_params.append(test_name) | param_sets = [[]] | ||||
params = test_params.get(test_name) | additional_params = test_params.get(test_file) | ||||
if params is not None: | if additional_params is not None: | ||||
tests_with_params.extend( | param_sets.extend(additional_params) | ||||
[test_name + " " + " ".join(p) for p in params]) | |||||
return tests_with_params | return param_sets | ||||
class RPCCoverage(): | class RPCCoverage(): | ||||
""" | """ | ||||
Coverage reporting utilities for test_runner. | Coverage reporting utilities for test_runner. | ||||
Coverage calculation works by having each test script subprocess write | Coverage calculation works by having each test script subprocess write | ||||
coverage files into a particular directory. These files contain the RPC | coverage files into a particular directory. These files contain the RPC | ||||
▲ Show 20 Lines • Show All 101 Lines • Show Last 20 Lines |