Changeset View
Changeset View
Standalone View
Standalone View
test/functional/test_runner.py
Show First 20 Lines • Show All 97 Lines • ▼ Show 20 Lines | class TestCase(): | ||||
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None): | def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None): | ||||
self.tests_dir = tests_dir | self.tests_dir = tests_dir | ||||
self.tmpdir = tmpdir | self.tmpdir = tmpdir | ||||
self.test_case = test_case | self.test_case = test_case | ||||
self.test_num = test_num | self.test_num = test_num | ||||
self.flags = flags | self.flags = flags | ||||
def run(self, portseed_offset): | def run(self, portseed_offset, run_extended): | ||||
t = self.test_case | t = self.test_case | ||||
portseed = self.test_num * 10 + portseed_offset | portseed = self.test_num * 10 + portseed_offset | ||||
portseed_arg = ["--portseed={}".format(portseed)] | portseed_arg = ["--portseed={}".format(portseed)] | ||||
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) | log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) | ||||
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) | log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) | ||||
test_argv = t.split() | test_argv = t.split() | ||||
tmpdir = [os.path.join("--tmpdir=%s", "%s_%s") % | tmpdir = [os.path.join("--tmpdir=%s", "%s_%s") % | ||||
(self.tmpdir, re.sub(".py$", "", t), portseed)] | (self.tmpdir, re.sub(".py$", "", t), portseed)] | ||||
Show All 17 Lines | def run(self, portseed_offset, run_extended): | ||||
sys.stderr = test_stderr | sys.stderr = test_stderr | ||||
exit_code = None | exit_code = None | ||||
try: | try: | ||||
print("Entry") | print("Entry") | ||||
for prop in dir(test_module): | for prop in dir(test_module): | ||||
obj = getattr(test_module, prop) | obj = getattr(test_module, prop) | ||||
if inspect.isclass(obj) and issubclass(obj, BitcoinTestFramework) and obj is not BitcoinTestFramework and obj is not ComparisonTestFramework: | if inspect.isclass(obj) and issubclass(obj, BitcoinTestFramework) and obj is not BitcoinTestFramework and obj is not ComparisonTestFramework: | ||||
test_instance = obj() | |||||
is_extended = hasattr(test_instance, 'extended') | |||||
if (is_extended and run_extended) or not is_extended: | |||||
exit_code = obj().main( | exit_code = obj().main( | ||||
test_argv[1:] + self.flags + portseed_arg + tmpdir) | test_argv[1:] + self.flags + portseed_arg + tmpdir) | ||||
else: | |||||
exit_code = TestStatus.SKIPPED | |||||
except Exception as e: | except Exception as e: | ||||
print(e) | print(e) | ||||
finally: | finally: | ||||
sys.stdout = original_stdout | sys.stdout = original_stdout | ||||
sys.stderr = original_stderr | sys.stderr = original_stderr | ||||
[stdout, stderr] = [test_stdout.getvalue(), test_stderr.getvalue()] | [stdout, stderr] = [test_stdout.getvalue(), test_stderr.getvalue()] | ||||
test_stdout.close(), test_stderr.close() | test_stdout.close(), test_stderr.close() | ||||
▲ Show 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | if args.help: | ||||
[os.path.join(tests_dir, test_list[0]), '-h']) | [os.path.join(tests_dir, test_list[0]), '-h']) | ||||
sys.exit(0) | sys.exit(0) | ||||
if not args.keepcache: | if not args.keepcache: | ||||
shutil.rmtree(os.path.join(build_dir, "test", | shutil.rmtree(os.path.join(build_dir, "test", | ||||
"cache"), ignore_errors=True) | "cache"), ignore_errors=True) | ||||
run_tests(test_list, build_dir, tests_dir, args.junitouput, | run_tests(test_list, build_dir, tests_dir, args.junitouput, | ||||
config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, build_timings) | config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, build_timings, args.extended) | ||||
def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], build_timings=None): | def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], build_timings=None, run_extended=None): | ||||
# Warn if bitcoind is already running (unix only) | # Warn if bitcoind is already running (unix only) | ||||
try: | try: | ||||
pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) | ||||
if pidofOutput is not None and pidofOutput != b'': | if pidofOutput is not None and pidofOutput != b'': | ||||
print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % ( | ||||
BOLD[1], BOLD[0])) | BOLD[1], BOLD[0])) | ||||
except (OSError, subprocess.SubprocessError): | except (OSError, subprocess.SubprocessError): | ||||
pass | pass | ||||
Show All 24 Lines | def run_tests(test_list, build_dir, tests_dir, junitouput, exeext, tmpdir, num_jobs, enable_coverage=False, args=[], build_timings=None, run_extended=None): | ||||
if len(test_list) > 1 and num_jobs > 1: | if len(test_list) > 1 and num_jobs > 1: | ||||
# Populate cache | # Populate cache | ||||
subprocess.check_output( | subprocess.check_output( | ||||
[os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir=%s", "cache") % tmpdir]) | [os.path.join(tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir=%s", "cache") % tmpdir]) | ||||
# Run Tests | # Run Tests | ||||
time0 = time.time() | time0 = time.time() | ||||
test_results = execute_test_processes( | test_results = execute_test_processes( | ||||
num_jobs, test_list, tests_dir, tmpdir, flags) | num_jobs, test_list, tests_dir, tmpdir, flags, run_extended) | ||||
runtime = int(time.time() - time0) | runtime = int(time.time() - time0) | ||||
max_len_name = len(max(test_list, key=len)) | max_len_name = len(max(test_list, key=len)) | ||||
print_results(test_results, max_len_name, runtime) | print_results(test_results, max_len_name, runtime) | ||||
save_results_as_junit(test_results, junitouput, runtime) | save_results_as_junit(test_results, junitouput, runtime) | ||||
if (build_timings is not None): | if (build_timings is not None): | ||||
build_timings.save_timings(test_results) | build_timings.save_timings(test_results) | ||||
▲ Show 20 Lines • Show All 72 Lines • ▼ Show 20 Lines | while True: | ||||
except Empty as e: | except Empty as e: | ||||
if not on_ci(): | if not on_ci(): | ||||
print("Running jobs: {}".format( | print("Running jobs: {}".format( | ||||
", ".join(running_jobs)), end="\r") | ", ".join(running_jobs)), end="\r") | ||||
sys.stdout.flush() | sys.stdout.flush() | ||||
printed_status = True | printed_status = True | ||||
def handle_test_cases(job_queue, update_queue): | def handle_test_cases(job_queue, update_queue, run_extended): | ||||
""" | """ | ||||
job_runner represents a single thread that is part of a worker pool. | job_runner represents a single thread that is part of a worker pool. | ||||
It waits for a test, then executes that test. It also reports start | It waits for a test, then executes that test. It also reports start | ||||
and result messages to handle_update_messages. | and result messages to handle_update_messages. | ||||
""" | """ | ||||
# In case there is a graveyard of zombie bitcoinds, we can apply a | # In case there is a graveyard of zombie bitcoinds, we can apply a | ||||
# pseudorandom offset to hopefully jump over them. | # pseudorandom offset to hopefully jump over them. | ||||
# (625 is PORT_RANGE/MAX_NODES) | # (625 is PORT_RANGE/MAX_NODES) | ||||
portseed_offset = int(time.time() * 1000) % 625 | portseed_offset = int(time.time() * 1000) % 625 | ||||
while True: | while True: | ||||
test = job_queue.get() | test = job_queue.get() | ||||
if test is None: | if test is None: | ||||
break | break | ||||
# Signal that the test is starting to inform the poor waiting | # Signal that the test is starting to inform the poor waiting | ||||
# programmer | # programmer | ||||
update_queue.put(test) | update_queue.put(test) | ||||
result = test.run(portseed_offset) | result = test.run(portseed_offset, run_extended) | ||||
update_queue.put(result) | update_queue.put(result) | ||||
job_queue.task_done() | job_queue.task_done() | ||||
def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags): | def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags, run_extended): | ||||
ctx = mp.get_context('spawn') | ctx = mp.get_context('spawn') | ||||
update_queue = ctx.JoinableQueue() | update_queue = ctx.JoinableQueue() | ||||
job_queue = ctx.JoinableQueue() | job_queue = ctx.JoinableQueue() | ||||
results_queue = ctx.Queue() | results_queue = ctx.Queue() | ||||
## | ## | ||||
# Setup our threads, and start sending tasks | # Setup our threads, and start sending tasks | ||||
## | ## | ||||
mp.Pipe() | mp.Pipe() | ||||
# Start our result collection thread. | # Start our result collection thread. | ||||
t = ctx.Process(target=handle_update_messages, | t = ctx.Process(target=handle_update_messages, | ||||
args=(update_queue, results_queue,)) | args=(update_queue, results_queue,)) | ||||
t.start() | t.start() | ||||
# Start some worker threads | # Start some worker threads | ||||
for j in range(num_jobs): | for j in range(num_jobs): | ||||
t = ctx.Process(target=handle_test_cases, | t = ctx.Process(target=handle_test_cases, | ||||
args=(job_queue, update_queue,)) | args=(job_queue, update_queue, run_extended,)) | ||||
t.start() | t.start() | ||||
# Push all our test cases into the job queue. | # Push all our test cases into the job queue. | ||||
for i, t in enumerate(test_list): | for i, t in enumerate(test_list): | ||||
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags)) | job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags)) | ||||
# Wait for all the jobs to be completed | # Wait for all the jobs to be completed | ||||
job_queue.join() | job_queue.join() | ||||
▲ Show 20 Lines • Show All 260 Lines • Show Last 20 Lines |