diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -137,7 +137,7 @@ testdir = os.path.join("{}", "{}_{}").format( self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] - time0 = time.time() + start_time = time.time() process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, @@ -156,7 +156,7 @@ status = "Failed" return TestResult(self.test_num, self.test_case, testdir, status, - time.time() - time0, stdout, stderr) + time.time() - start_time, stdout, stderr) def on_ci(): @@ -271,14 +271,14 @@ # in the all_scripts list. Accept the name with or without .py # extension. individual_tests = [ - re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')] + re.sub(r"\.py$", "", test) + ".py" for test in tests if not test.endswith('*')] test_list = [] - for t in individual_tests: - if t in all_scripts: - test_list.append(t) + for test in individual_tests: + if test in all_scripts: + test_list.append(test) else: print("{}WARNING!{} Test '{}' not found in full test list.".format( - BOLD[1], BOLD[0], t)) + BOLD[1], BOLD[0], test)) # Allow for wildcard at the end of the name, so a single input can # match multiple tests @@ -290,18 +290,15 @@ # do not cut off explicitly specified tests cutoff = sys.maxsize else: - # No individual tests have been specified. - # Run all tests that do not exceed + # Run base tests only test_list = all_scripts - cutoff = args.cutoff - if args.extended: - cutoff = sys.maxsize + cutoff = sys.maxsize if args.extended else args.cutoff # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: - tests_excl = [re.sub(r"\.py$", "", t) - + (".py" if ".py" not in t else "") for t in args.exclude.split(',')] - for exclude_test in tests_excl: + exclude_tests = [re.sub(r"\.py$", "", test) + + (".py" if ".py" not in test else "") for test in args.exclude.split(',')] + for exclude_test in exclude_tests: if exclude_test in test_list: test_list.remove(exclude_test) else: @@ -396,10 +393,10 @@ raise # Run Tests - time0 = time.time() + start_time = time.time() test_results = execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast) - runtime = time.time() - time0 + runtime = time.time() - start_time max_len_name = len(max(test_list, key=len)) print_results(test_results, tests_dir, max_len_name, @@ -542,7 +539,7 @@ resultCollector.start() # Start some worker threads - for j in range(num_jobs): + for job in range(num_jobs): t = threading.Thread(target=handle_test_cases) t.daemon = True t.start() @@ -559,7 +556,7 @@ # Flush our queues so the threads exit update_queue.put(None) - for j in range(num_jobs): + for job in range(num_jobs): job_queue.put(None) return test_results @@ -715,9 +712,10 @@ params = test_params.get(test_name) if params is not None: tests_with_params.extend( - [test_name + " " + " ".join(p) for p in params]) + [test_name + " " + " ".join(parameter) for parameter in params]) - result = [t for t in tests_with_params if get_test_time(t) <= cutoff] + result = [ + test for test in tests_with_params if get_test_time(test) <= cutoff] result.sort(key=lambda x: (-get_test_time(x), x)) return result @@ -777,8 +775,8 @@ if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") - with open(coverage_ref_filename, 'r', encoding="utf8") as f: - all_cmds.update([i.strip() for i in f.readlines()]) + with open(coverage_ref_filename, 'r', encoding="utf8") as file: + all_cmds.update([line.strip() for line in file.readlines()]) for root, _, files in os.walk(self.dir): for filename in files: @@ -786,8 +784,9 @@ coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: - with open(filename, 'r', encoding="utf8") as f: - covered_cmds.update([i.strip() for i in f.readlines()]) + with open(filename, 'r', encoding="utf8") as file: + covered_cmds.update([line.strip() + for line in file.readlines()]) return all_cmds - covered_cmds @@ -840,8 +839,8 @@ def load_timings(self): if os.path.isfile(self.timing_file): - with open(self.timing_file, encoding="utf8") as f: - return json.load(f) + with open(self.timing_file, encoding="utf8") as file: + return json.load(file) else: return [] @@ -867,13 +866,14 @@ def save_timings(self, test_results): # we only save test that have passed - timings for failed test might be # wrong (timeouts or early fails) - passed_results = [t for t in test_results if t.status == 'Passed'] - new_timings = list(map(lambda t: {'name': t.name, 'time': TimeResolution.seconds(t.time)}, + passed_results = [ + test for test in test_results if test.status == 'Passed'] + new_timings = list(map(lambda test: {'name': test.name, 'time': TimeResolution.seconds(test.time)}, passed_results)) merged_timings = self.get_merged_timings(new_timings) - with open(self.timing_file, 'w', encoding="utf8") as f: - json.dump(merged_timings, f, indent=True) + with open(self.timing_file, 'w', encoding="utf8") as file: + json.dump(merged_timings, file, indent=True) class TimeResolution: