Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show First 20 Lines • Show All 131 Lines • ▼ Show 20 Lines | def run(self, portseed_offset): | ||||
portseed = self.test_num + portseed_offset | portseed = self.test_num + portseed_offset | ||||
portseed_arg = ["--portseed={}".format(portseed)] | portseed_arg = ["--portseed={}".format(portseed)] | ||||
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) | log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) | ||||
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) | log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) | ||||
test_argv = self.test_case.split() | test_argv = self.test_case.split() | ||||
testdir = os.path.join("{}", "{}_{}").format( | testdir = os.path.join("{}", "{}_{}").format( | ||||
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) | self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) | ||||
tmpdir_arg = ["--tmpdir={}".format(testdir)] | tmpdir_arg = ["--tmpdir={}".format(testdir)] | ||||
time0 = time.time() | start_time = time.time() | ||||
process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, | process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, | ||||
universal_newlines=True, | universal_newlines=True, | ||||
stdout=log_stdout, | stdout=log_stdout, | ||||
stderr=log_stderr) | stderr=log_stderr) | ||||
process.wait() | process.wait() | ||||
log_stdout.seek(0), log_stderr.seek(0) | log_stdout.seek(0), log_stderr.seek(0) | ||||
[stdout, stderr] = [log.read().decode('utf-8') | [stdout, stderr] = [log.read().decode('utf-8') | ||||
for log in (log_stdout, log_stderr)] | for log in (log_stdout, log_stderr)] | ||||
log_stdout.close(), log_stderr.close() | log_stdout.close(), log_stderr.close() | ||||
if process.returncode == TEST_EXIT_PASSED and stderr == "": | if process.returncode == TEST_EXIT_PASSED and stderr == "": | ||||
status = "Passed" | status = "Passed" | ||||
elif process.returncode == TEST_EXIT_SKIPPED: | elif process.returncode == TEST_EXIT_SKIPPED: | ||||
status = "Skipped" | status = "Skipped" | ||||
else: | else: | ||||
status = "Failed" | status = "Failed" | ||||
return TestResult(self.test_num, self.test_case, testdir, status, | return TestResult(self.test_num, self.test_case, testdir, status, | ||||
time.time() - time0, stdout, stderr) | time.time() - start_time, stdout, stderr) | ||||
def on_ci(): | def on_ci(): | ||||
return os.getenv('TRAVIS') == 'true' or os.getenv( | return os.getenv('TRAVIS') == 'true' or os.getenv( | ||||
'TEAMCITY_VERSION') is not None | 'TEAMCITY_VERSION') is not None | ||||
def main(): | def main(): | ||||
▲ Show 20 Lines • Show All 98 Lines • ▼ Show 20 Lines | for test in TEST_PARAMS: | ||||
"not been renamed or deleted".format(test)) | "not been renamed or deleted".format(test)) | ||||
sys.exit(1) | sys.exit(1) | ||||
if tests: | if tests: | ||||
# Individual tests have been specified. Run specified tests that exist | # Individual tests have been specified. Run specified tests that exist | ||||
# in the all_scripts list. Accept the name with or without .py | # in the all_scripts list. Accept the name with or without .py | ||||
# extension. | # extension. | ||||
individual_tests = [ | individual_tests = [ | ||||
re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')] | re.sub(r"\.py$", "", test) + ".py" for test in tests if not test.endswith('*')] | ||||
test_list = [] | test_list = [] | ||||
for t in individual_tests: | for test in individual_tests: | ||||
if t in all_scripts: | if test in all_scripts: | ||||
test_list.append(t) | test_list.append(test) | ||||
else: | else: | ||||
print("{}WARNING!{} Test '{}' not found in full test list.".format( | print("{}WARNING!{} Test '{}' not found in full test list.".format( | ||||
BOLD[1], BOLD[0], t)) | BOLD[1], BOLD[0], test)) | ||||
# Allow for wildcard at the end of the name, so a single input can | # Allow for wildcard at the end of the name, so a single input can | ||||
# match multiple tests | # match multiple tests | ||||
for test in tests: | for test in tests: | ||||
if test.endswith('*'): | if test.endswith('*'): | ||||
test_list.extend( | test_list.extend( | ||||
[t for t in all_scripts if t.startswith(test[:-1])]) | [t for t in all_scripts if t.startswith(test[:-1])]) | ||||
# do not cut off explicitly specified tests | # do not cut off explicitly specified tests | ||||
cutoff = sys.maxsize | cutoff = sys.maxsize | ||||
else: | else: | ||||
# No individual tests have been specified. | # Run base tests only | ||||
# Run all tests that do not exceed | |||||
test_list = all_scripts | test_list = all_scripts | ||||
cutoff = args.cutoff | cutoff = sys.maxsize if args.extended else args.cutoff | ||||
if args.extended: | |||||
cutoff = sys.maxsize | |||||
# Remove the test cases that the user has explicitly asked to exclude. | # Remove the test cases that the user has explicitly asked to exclude. | ||||
if args.exclude: | if args.exclude: | ||||
tests_excl = [re.sub(r"\.py$", "", t) | exclude_tests = [re.sub(r"\.py$", "", test) | ||||
+ (".py" if ".py" not in t else "") for t in args.exclude.split(',')] | + (".py" if ".py" not in test else "") for test in args.exclude.split(',')] | ||||
for exclude_test in tests_excl: | for exclude_test in exclude_tests: | ||||
if exclude_test in test_list: | if exclude_test in test_list: | ||||
test_list.remove(exclude_test) | test_list.remove(exclude_test) | ||||
else: | else: | ||||
print("{}WARNING!{} Test '{}' not found in current test list.".format( | print("{}WARNING!{} Test '{}' not found in current test list.".format( | ||||
BOLD[1], BOLD[0], exclude_test)) | BOLD[1], BOLD[0], exclude_test)) | ||||
# Update timings from build_dir only if separate build directory is used. | # Update timings from build_dir only if separate build directory is used. | ||||
# We do not want to pollute source directory. | # We do not want to pollute source directory. | ||||
▲ Show 20 Lines • Show All 78 Lines • ▼ Show 20 Lines | if len(test_list) > 1 and num_jobs > 1: | ||||
try: | try: | ||||
subprocess.check_output([sys.executable, os.path.join( | subprocess.check_output([sys.executable, os.path.join( | ||||
tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)]) | tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)]) | ||||
except subprocess.CalledProcessError as e: | except subprocess.CalledProcessError as e: | ||||
sys.stdout.buffer.write(e.output) | sys.stdout.buffer.write(e.output) | ||||
raise | raise | ||||
# Run Tests | # Run Tests | ||||
time0 = time.time() | start_time = time.time() | ||||
test_results = execute_test_processes( | test_results = execute_test_processes( | ||||
num_jobs, test_list, tests_dir, tmpdir, flags, failfast) | num_jobs, test_list, tests_dir, tmpdir, flags, failfast) | ||||
runtime = time.time() - time0 | runtime = time.time() - start_time | ||||
max_len_name = len(max(test_list, key=len)) | max_len_name = len(max(test_list, key=len)) | ||||
print_results(test_results, tests_dir, max_len_name, | print_results(test_results, tests_dir, max_len_name, | ||||
runtime, combined_logs_len) | runtime, combined_logs_len) | ||||
if junitoutput is not None: | if junitoutput is not None: | ||||
save_results_as_junit( | save_results_as_junit( | ||||
test_results, | test_results, | ||||
▲ Show 20 Lines • Show All 126 Lines • ▼ Show 20 Lines | def execute_test_processes( | ||||
## | ## | ||||
# Start our result collection thread. | # Start our result collection thread. | ||||
resultCollector = threading.Thread(target=handle_update_messages) | resultCollector = threading.Thread(target=handle_update_messages) | ||||
resultCollector.daemon = True | resultCollector.daemon = True | ||||
resultCollector.start() | resultCollector.start() | ||||
# Start some worker threads | # Start some worker threads | ||||
for j in range(num_jobs): | for job in range(num_jobs): | ||||
t = threading.Thread(target=handle_test_cases) | t = threading.Thread(target=handle_test_cases) | ||||
t.daemon = True | t.daemon = True | ||||
t.start() | t.start() | ||||
# Push all our test cases into the job queue. | # Push all our test cases into the job queue. | ||||
for i, t in enumerate(test_list): | for i, t in enumerate(test_list): | ||||
job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags)) | job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags)) | ||||
# Wait for all the jobs to be completed | # Wait for all the jobs to be completed | ||||
job_queue.join() | job_queue.join() | ||||
# Wait for all the results to be compiled | # Wait for all the results to be compiled | ||||
update_queue.join() | update_queue.join() | ||||
# Flush our queues so the threads exit | # Flush our queues so the threads exit | ||||
update_queue.put(None) | update_queue.put(None) | ||||
for j in range(num_jobs): | for job in range(num_jobs): | ||||
job_queue.put(None) | job_queue.put(None) | ||||
return test_results | return test_results | ||||
def print_results(test_results, tests_dir, max_len_name, | def print_results(test_results, tests_dir, max_len_name, | ||||
runtime, combined_logs_len): | runtime, combined_logs_len): | ||||
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( | results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( | ||||
▲ Show 20 Lines • Show All 139 Lines • ▼ Show 20 Lines | def get_tests_to_run(test_list, test_params, cutoff, src_timings): | ||||
# list. | # list. | ||||
tests_with_params = [] | tests_with_params = [] | ||||
for test_name in test_list: | for test_name in test_list: | ||||
# always execute a test without parameters | # always execute a test without parameters | ||||
tests_with_params.append(test_name) | tests_with_params.append(test_name) | ||||
params = test_params.get(test_name) | params = test_params.get(test_name) | ||||
if params is not None: | if params is not None: | ||||
tests_with_params.extend( | tests_with_params.extend( | ||||
[test_name + " " + " ".join(p) for p in params]) | [test_name + " " + " ".join(parameter) for parameter in params]) | ||||
result = [t for t in tests_with_params if get_test_time(t) <= cutoff] | result = [ | ||||
test for test in tests_with_params if get_test_time(test) <= cutoff] | |||||
result.sort(key=lambda x: (-get_test_time(x), x)) | result.sort(key=lambda x: (-get_test_time(x), x)) | ||||
return result | return result | ||||
class RPCCoverage(): | class RPCCoverage(): | ||||
""" | """ | ||||
Coverage reporting utilities for test_runner. | Coverage reporting utilities for test_runner. | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | def _get_uncovered_rpc_commands(self): | ||||
coverage_ref_filename = os.path.join(self.dir, reference_filename) | coverage_ref_filename = os.path.join(self.dir, reference_filename) | ||||
coverage_filenames = set() | coverage_filenames = set() | ||||
all_cmds = set() | all_cmds = set() | ||||
covered_cmds = set() | covered_cmds = set() | ||||
if not os.path.isfile(coverage_ref_filename): | if not os.path.isfile(coverage_ref_filename): | ||||
raise RuntimeError("No coverage reference found") | raise RuntimeError("No coverage reference found") | ||||
with open(coverage_ref_filename, 'r', encoding="utf8") as f: | with open(coverage_ref_filename, 'r', encoding="utf8") as file: | ||||
all_cmds.update([i.strip() for i in f.readlines()]) | all_cmds.update([line.strip() for line in file.readlines()]) | ||||
for root, _, files in os.walk(self.dir): | for root, _, files in os.walk(self.dir): | ||||
for filename in files: | for filename in files: | ||||
if filename.startswith(coverage_file_prefix): | if filename.startswith(coverage_file_prefix): | ||||
coverage_filenames.add(os.path.join(root, filename)) | coverage_filenames.add(os.path.join(root, filename)) | ||||
for filename in coverage_filenames: | for filename in coverage_filenames: | ||||
with open(filename, 'r', encoding="utf8") as f: | with open(filename, 'r', encoding="utf8") as file: | ||||
covered_cmds.update([i.strip() for i in f.readlines()]) | covered_cmds.update([line.strip() | ||||
for line in file.readlines()]) | |||||
return all_cmds - covered_cmds | return all_cmds - covered_cmds | ||||
def save_results_as_junit(test_results, file_name, time, test_suite_name): | def save_results_as_junit(test_results, file_name, time, test_suite_name): | ||||
""" | """ | ||||
Save tests results to file in JUnit format | Save tests results to file in JUnit format | ||||
Show All 36 Lines | class Timings(): | ||||
""" | """ | ||||
def __init__(self, timing_file): | def __init__(self, timing_file): | ||||
self.timing_file = timing_file | self.timing_file = timing_file | ||||
self.existing_timings = self.load_timings() | self.existing_timings = self.load_timings() | ||||
def load_timings(self): | def load_timings(self): | ||||
if os.path.isfile(self.timing_file): | if os.path.isfile(self.timing_file): | ||||
with open(self.timing_file, encoding="utf8") as f: | with open(self.timing_file, encoding="utf8") as file: | ||||
return json.load(f) | return json.load(file) | ||||
else: | else: | ||||
return [] | return [] | ||||
def get_merged_timings(self, new_timings): | def get_merged_timings(self, new_timings): | ||||
""" | """ | ||||
Return new list containing existing timings updated with new timings | Return new list containing existing timings updated with new timings | ||||
Tests that do not exists are not removed | Tests that do not exists are not removed | ||||
""" | """ | ||||
Show All 9 Lines | def get_merged_timings(self, new_timings): | ||||
# Sort the result to preserve test ordering in file | # Sort the result to preserve test ordering in file | ||||
merged = list(merged.values()) | merged = list(merged.values()) | ||||
merged.sort(key=lambda t, key=key: t[key]) | merged.sort(key=lambda t, key=key: t[key]) | ||||
return merged | return merged | ||||
def save_timings(self, test_results): | def save_timings(self, test_results): | ||||
# we only save test that have passed - timings for failed test might be | # we only save test that have passed - timings for failed test might be | ||||
# wrong (timeouts or early fails) | # wrong (timeouts or early fails) | ||||
passed_results = [t for t in test_results if t.status == 'Passed'] | passed_results = [ | ||||
new_timings = list(map(lambda t: {'name': t.name, 'time': TimeResolution.seconds(t.time)}, | test for test in test_results if test.status == 'Passed'] | ||||
new_timings = list(map(lambda test: {'name': test.name, 'time': TimeResolution.seconds(test.time)}, | |||||
passed_results)) | passed_results)) | ||||
merged_timings = self.get_merged_timings(new_timings) | merged_timings = self.get_merged_timings(new_timings) | ||||
with open(self.timing_file, 'w', encoding="utf8") as f: | with open(self.timing_file, 'w', encoding="utf8") as file: | ||||
json.dump(merged_timings, f, indent=True) | json.dump(merged_timings, file, indent=True) | ||||
class TimeResolution: | class TimeResolution: | ||||
@staticmethod | @staticmethod | ||||
def seconds(time_fractional_second): | def seconds(time_fractional_second): | ||||
return round(time_fractional_second) | return round(time_fractional_second) | ||||
@staticmethod | @staticmethod | ||||
def milliseconds(time_fractional_second): | def milliseconds(time_fractional_second): | ||||
return round(time_fractional_second, 3) | return round(time_fractional_second, 3) | ||||
if __name__ == '__main__': | if __name__ == '__main__': | ||||
main() | main() |