diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -113,6 +113,7 @@ self.options.tmpdir = tempfile.mkdtemp(prefix="test") self._start_logging() + print(self.options.tmpdir) success = TestStatus.FAILED try: @@ -463,6 +464,7 @@ self.options.cachedir, i, "fee_estimates.dat")) for i in range(self.num_nodes): + print("Why Initialiation faile?", self.options.tmpdir) from_dir = os.path.join(self.options.cachedir, "node" + str(i)) to_dir = os.path.join(self.options.tmpdir, "node" + str(i)) shutil.copytree(from_dir, to_dir) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -35,6 +35,7 @@ import importlib.util import inspect import multiprocessing as mp +import random from queue import Full, Empty from io import StringIO @@ -91,67 +92,90 @@ DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 -class TestCase(): +class TestStarted(): + def __init__(self, test_file, test_name): + self.test_file = test_file + self.test_name = test_name + + +class TestFile(): """ - Data structure to hold and run information necessary to launch a test case. + Data structure to hold and run information necessary to launch test cases. """ - def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None): + def __init__(self, test_file, tests_dir, tmpdir): self.tests_dir = tests_dir self.tmpdir = tmpdir - self.test_case = test_case - self.test_num = test_num - self.flags = flags - - def run(self, portseed_offset, run_tags): - t = self.test_case - portseed = self.test_num * 1000 + portseed_offset - portseed_arg = ["--portseed={}".format(portseed)] - test_argv = t.split() - tmpdir = [os.path.join("--tmpdir=%s", "%s_%s") % - (self.tmpdir, re.sub(".py$", "", t), portseed)] - name = t - time0 = time.time() + self.test_file = test_file + def find_and_run_tests(self, update_queue, run_tags, base_flags): + param_sets = get_test_parameters(self.test_file, TEST_PARAMS) test_modulepath = None try: # Dynamically import the test so we can introspect it test_modulepath = os.path.abspath( - os.path.join(self.tests_dir, test_argv[0])) + os.path.join(self.tests_dir, self.test_file)) test_spec = importlib.util.spec_from_file_location( - os.path.splitext(test_argv[0])[0], test_modulepath) + os.path.splitext(self.test_file)[0], test_modulepath) test_module = importlib.util.module_from_spec(test_spec) test_spec.loader.exec_module(test_module) except Exception as e: - return TestResult(name, "Failed", 0, "", str(e)) + print("Test file failed to parse:") + print(e) + TestResult(self.test_file, TestStatus.Failed, 0, "", str(e)) + return + + # Store our test cases before running them so we can do some accouninting in order to keep old test names where applicable. + # We don't want to lose test results in CI. + test_cases = [] + for prop in dir(test_module): + obj = getattr(test_module, prop) + if inspect.isclass(obj) and issubclass(obj, BitcoinTestFramework) and \ + obj is not BitcoinTestFramework and obj is not ComparisonTestFramework: + # Give every test the fast tag by default unless otherwise specified + tags = ["fast"] + if hasattr(obj, 'test_tags'): + tags = obj.test_tags + if not compare_tags(tags, run_tags): + continue + test_cases.append(obj) + + for test_case in test_cases: + for param_set in param_sets: + test_instance = test_case() + # For compatible with old test printer. + # TODO: Update test result printing + legacy_name = " ".join( + [self.test_file, obj.__name__] + param_set) + # Use the old name if there's only one test in the file. + if len(test_cases) == 1: + legacy_name = " ".join([self.test_file] + param_set) + update_queue.put(TestStarted(self.test_file, legacy_name)) + test_result = self.run_test( + test_instance, obj.__name__, param_set, legacy_name, base_flags, run_tags) + update_queue.put(test_result) + + def run_test(self, test_instance, test_name, param_set, legacy_name, base_flags, run_tags): + time0 = time.time() + portseed = random.randint(2**15, 2**16) + # Setup output capturing + original_stdout = sys.stdout + original_stderr = sys.stderr + test_stdout = StringIO() + test_stderr = StringIO() + sys.stdout = test_stdout + sys.stderr = test_stderr + exit_code = TestStatus.SKIPPED + + param_set.extend(["--portseed={}".format(portseed), + "--tmpdir=" + os.path.join(self.tmpdir, re.sub(".py$", "", self.test_file) + "_" + test_name + "_".join(param_set) + "_" + str(portseed))]) try: - # Setup output capturing - original_stdout = sys.stdout - original_stderr = sys.stderr - test_stdout = StringIO() - test_stderr = StringIO() - sys.stdout = test_stdout - sys.stderr = test_stderr - - exit_code = None - for prop in dir(test_module): - obj = getattr(test_module, prop) - if inspect.isclass(obj) and issubclass(obj, BitcoinTestFramework) and obj is not BitcoinTestFramework and obj is not ComparisonTestFramework: - test_instance = obj() - - # Give every test the fast tag by default unless otherwise specified - tags = ["fast"] - if hasattr(test_instance, 'test_tags'): - tags = test_instance.test_tags - - if compare_tags(tags, run_tags): - exit_code = obj().main( - test_argv[1:] + self.flags + portseed_arg + tmpdir) - else: - exit_code = TestStatus.SKIPPED + # Use our argv. When we import tests, argparse expects this. + exit_code = test_instance.main(base_flags + param_set) except Exception as e: print(e) + exit_code = TestStatus.FAILED finally: sys.stdout = original_stdout sys.stderr = original_stderr @@ -166,7 +190,7 @@ else: status = "Failed" - return TestResult(name, status, int(time.time() - time0), stdout, stderr) + return TestResult(legacy_name, status, int(time.time() - time0), stdout, stderr) def on_ci(): @@ -281,9 +305,6 @@ if exclude_test + ".py" in test_list: test_list.remove(exclude_test + ".py") - # Add test parameters and remove long running tests if needed - test_list = get_tests_to_run(test_list, TEST_PARAMS) - if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") @@ -349,7 +370,7 @@ num_jobs, test_list, tests_dir, tmpdir, flags, tags) runtime = int(time.time() - time0) - max_len_name = len(max(test_list, key=len)) + max_len_name = len(max([result.name for result in test_results], key=len)) print_results(test_results, max_len_name, runtime) save_results_as_junit(test_results, junitouput, runtime) @@ -377,9 +398,9 @@ """ handle_message handles a single message from handle_test_cases """ - if isinstance(message, TestCase): - running_jobs.add(message.test_case) - print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0])) + if isinstance(message, TestStarted): + running_jobs.add(message.test_name) + print("{}{}{} started".format(BOLD[1], message.test_name, BOLD[0])) return if isinstance(message, TestResult): test_result = message @@ -432,7 +453,7 @@ printed_status = True -def handle_test_cases(job_queue, update_queue, tags): +def handle_test_files(job_queue, update_queue, tags, base_flags): """ job_runner represents a single thread that is part of a worker pool. It waits for a test, then executes that test. It also reports start @@ -441,7 +462,6 @@ # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) - portseed_offset = int(time.time() * 1000) % 625 while True: test = job_queue.get() @@ -449,9 +469,7 @@ break # Signal that the test is starting to inform the poor waiting # programmer - update_queue.put(test) - result = test.run(portseed_offset, tags) - update_queue.put(result) + test.find_and_run_tests(update_queue, tags, base_flags) job_queue.task_done() @@ -490,13 +508,13 @@ # Start some worker threads for j in range(num_jobs): - t = ctx.Process(target=handle_test_cases, - args=(job_queue, update_queue, tags,)) + t = ctx.Process(target=handle_test_files, + args=(job_queue, update_queue, tags, flags,)) t.start() - # Push all our test cases into the job queue. - for i, t in enumerate(test_list): - job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags)) + # Push all our test files into the job queue. + for _, t in enumerate(test_list): + job_queue.put(TestFile(t, tests_dir, tmpdir)) # Wait for all the jobs to be completed job_queue.join() @@ -573,22 +591,19 @@ return list(python_files - set(non_scripts)) -def get_tests_to_run(test_list, test_params): +def get_test_parameters(test_file, test_params): """ - Returns all combinations of tests with testing flags + Returns all combinations of a test_file with testing flags """ # Some tests must also be run with additional parameters. Add them to the list. - tests_with_params = [] - for test_name in test_list: - # always execute a test without parameters - tests_with_params.append(test_name) - params = test_params.get(test_name) - if params is not None: - tests_with_params.extend( - [test_name + " " + " ".join(p) for p in params]) - - return tests_with_params + # always execute a test without parameters + param_sets = [[]] + additional_params = test_params.get(test_file) + if additional_params is not None: + param_sets.extend(additional_params) + + return param_sets class RPCCoverage():