diff --git a/qa/pull-tester/rpc-tests.py b/qa/pull-tester/rpc-tests.py index 6df67ca42..31b780e9e 100755 --- a/qa/pull-tester/rpc-tests.py +++ b/qa/pull-tester/rpc-tests.py @@ -1,373 +1,374 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Run Regression Test Suite This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts, other than: - `-extended`: run the "extended" test suite in addition to the basic one. - `-win`: signal that this is running in a Windows environment, and we should run the tests. - `--coverage`: this generates a basic coverage report for the RPC interface. For a description of arguments recognized by test scripts, see `qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import os import time import shutil import sys import subprocess import tempfile import re sys.path.append("qa/pull-tester/") from tests_config import * BOLD = ("","") if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/' #If imported values are not defined then set to zero (or disabled) if 'ENABLE_WALLET' not in vars(): ENABLE_WALLET=0 if 'ENABLE_BITCOIND' not in vars(): ENABLE_BITCOIND=0 if 'ENABLE_UTILS' not in vars(): ENABLE_UTILS=0 if 'ENABLE_ZMQ' not in vars(): ENABLE_ZMQ=0 ENABLE_COVERAGE=0 #Create a set to store arguments and create the passon string opts = set() passon_args = [] PASSON_REGEX = re.compile("^--") PARALLEL_REGEX = re.compile('^-parallel=') print_help = False run_parallel = 4 for arg in sys.argv[1:]: if arg == "--help" or arg == "-h" or arg == "-?": print_help = True break if arg == '--coverage': ENABLE_COVERAGE = 1 elif PASSON_REGEX.match(arg): passon_args.append(arg) elif PARALLEL_REGEX.match(arg): run_parallel = int(arg.split(sep='=', maxsplit=1)[1]) else: opts.add(arg) #Set env vars if "BITCOIND" not in os.environ: os.environ["BITCOIND"] = BUILDDIR + '/src/bitcoind' + EXEEXT if EXEEXT == ".exe" and "-win" not in opts: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print("Win tests currently disabled by default. Use -win option to enable") sys.exit(0) if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1): print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") sys.exit(0) # python3-zmq may not be installed. Handle this gracefully and with some helpful info if ENABLE_ZMQ: try: import zmq except ImportError: print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " "to run zmq tests, see dependency info in /qa/README.md.") # ENABLE_ZMQ=0 raise testScripts = [ # longest test should go first, to favor running tests in parallel 'wallet-hd.py', 'walletbackup.py', # vv Tests less than 5m vv 'p2p-fullblocktest.py', 'fundrawtransaction.py', 'p2p-compactblocks.py', # vv Tests less than 2m vv 'wallet.py', 'wallet-accounts.py', 'wallet-dump.py', 'listtransactions.py', # vv Tests less than 60s vv 'sendheaders.py', 'zapwallettxes.py', 'importmulti.py', 'mempool_limit.py', 'merkle_blocks.py', 'receivedby.py', 'abandonconflict.py', 'bip68-112-113-p2p.py', 'rawtransactions.py', 'reindex.py', # vv Tests less than 30s vv 'mempool_resurrect_test.py', 'txn_doublespend.py --mineblock', 'txn_clone.py', 'getchaintips.py', 'rest.py', 'mempool_spendcoinbase.py', 'mempool_reorg.py', 'httpbasics.py', 'multi_rpc.py', 'proxy_test.py', 'signrawtransactions.py', 'nodehandling.py', 'decodescript.py', 'blockchain.py', 'disablewallet.py', 'keypool.py', 'p2p-mempool.py', 'prioritise_transaction.py', 'invalidblockrequest.py', 'invalidtxrequest.py', 'p2p-versionbits-warning.py', 'preciousblock.py', 'importprunedfunds.py', 'signmessages.py', 'nulldummy.py', 'import-rescan.py', 'rpcnamedargs.py', 'listsinceblock.py', 'p2p-leaktests.py', 'abc-cmdline.py', 'abc-p2p-activation.py', 'abc-p2p-fullblocktest.py', 'abc-rpc.py', + 'mempool-accept-txn.py', ] if ENABLE_ZMQ: testScripts.append('zmq_test.py') testScriptsExt = [ 'pruning.py', # vv Tests less than 20m vv 'smartfees.py', # vv Tests less than 5m vv 'maxuploadtarget.py', 'mempool_packages.py', # vv Tests less than 2m vv 'bip68-sequence.py', 'getblocktemplate_longpoll.py', 'p2p-timeouts.py', # vv Tests less than 60s vv 'bip9-softforks.py', 'p2p-feefilter.py', 'rpcbind_test.py', # vv Tests less than 30s vv 'bip65-cltv.py', 'bip65-cltv-p2p.py', 'bipdersig-p2p.py', 'bipdersig.py', 'getblocktemplate_proposals.py', 'txn_doublespend.py', 'txn_clone.py --mineblock', 'forknotify.py', 'invalidateblock.py', 'maxblocksinflight.py', 'p2p-acceptblock.py', ] def runtests(): test_list = [] if '-extended' in opts: test_list = testScripts + testScriptsExt elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts): test_list = testScripts else: for t in testScripts + testScriptsExt: if t in opts or re.sub(".py$", "", t) in opts: test_list.append(t) if print_help: # Only print help of the first script and exit subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h']) sys.exit(0) coverage = None if ENABLE_COVERAGE: coverage = RPCCoverage() print("Initializing coverage directory at %s\n" % coverage.dir) flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args flags.append("--cachedir=%s/qa/cache" % BUILDDIR) if coverage: flags.append(coverage.flag) if len(test_list) > 1 and run_parallel > 1: # Populate cache subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags) #Run Tests max_len_name = len(max(test_list, key=len)) time_sum = 0 time0 = time.time() job_queue = RPCTestHandler(run_parallel, test_list, flags) results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] all_passed = True for _ in range(len(test_list)): (name, stdout, stderr, passed, duration) = job_queue.get_next() all_passed = all_passed and passed time_sum += duration print('\n' + BOLD[1] + name + BOLD[0] + ":") print('' if passed else stdout + '\n', end='') print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration)) results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] print(results) print("\nRuntime: %s s" % (int(time.time() - time0))) if coverage: coverage.report_rpc_coverage() print("Cleaning up coverage data") coverage.cleanup() sys.exit(not all_passed) class RPCTestHandler: """ Trigger the testscrips passed in via the list. """ def __init__(self, num_tests_parallel, test_list=None, flags=None): assert(num_tests_parallel >= 1) self.num_jobs = num_tests_parallel self.test_list = test_list self.flags = flags self.num_running = 0 # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) self.portseed_offset = int(time.time() * 1000) % 625 self.jobs = [] def get_next(self): while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 t = self.test_list.pop(0) port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) self.jobs.append((t, time.time(), subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') while True: # Return first proc that finishes time.sleep(.5) for j in self.jobs: (name, time0, proc, log_out, log_err) = j if proc.poll() is not None: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] log_out.close(), log_err.close() passed = stderr == "" and proc.returncode == 0 self.num_running -= 1 self.jobs.remove(j) return name, stdout, stderr, passed, int(time.time() - time0) print('.', end='', flush=True) class RPCCoverage(object): """ Coverage reporting utilities for pull-tester. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: qa/rpc-tests/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - %s\n" % i) for i in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `qa/rpc-tests/test-framework/coverage.py` REFERENCE_FILENAME = 'rpc_interface.txt' COVERAGE_FILE_PREFIX = 'coverage.' coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r') as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(COVERAGE_FILE_PREFIX): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r') as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds if __name__ == '__main__': runtests() diff --git a/qa/rpc-tests/mempool-accept-txn.py b/qa/rpc-tests/mempool-accept-txn.py new file mode 100755 index 000000000..34502113b --- /dev/null +++ b/qa/rpc-tests/mempool-accept-txn.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +""" +This test checks acceptance of transactions by the mempool +It is derived from the much more complex p2p-fullblocktest. +""" + +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import * +from test_framework.comptool import TestManager, TestInstance +from test_framework.blocktools import * +import time +from test_framework.key import CECKey +from test_framework.script import * +import struct +from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, MAX_STANDARD_TX_SIGOPS + +# Error for too many sigops in one TX +TXNS_TOO_MANY_SIGOPS_ERROR = b'bad-txns-too-many-sigops' +RPC_TXNS_TOO_MANY_SIGOPS_ERROR = "64: " + TXNS_TOO_MANY_SIGOPS_ERROR.decode("utf-8") + +class PreviousSpendableOutput(object): + def __init__(self, tx = CTransaction(), n = -1): + self.tx = tx + self.n = n # the output we're spending + +class FullBlockTest(ComparisonTestFramework): + + # Can either run this test as 1 node with expected answers, or two and compare them. + # Change the "outcome" variable from each TestInstance object to only do the comparison. + def __init__(self): + super().__init__() + self.num_nodes = 1 + self.block_heights = {} + self.coinbase_key = CECKey() + self.coinbase_key.set_secretbytes(b"horsebattery") + self.coinbase_pubkey = self.coinbase_key.get_pubkey() + self.tip = None + self.blocks = {} + + def setup_network(self): + self.extra_args = [['-norelaypriority']] + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, + self.extra_args, + binary=[self.options.testbinary]) + + def add_options(self, parser): + super().add_options(parser) + parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + self.test.add_all_connections(self.nodes) + # Start up network handling in another thread + NetworkThread().start() + # Set the blocksize to legacy cap (1MB) as initial condition + self.nodes[0].setexcessiveblock(LEGACY_MAX_BLOCK_SIZE) + self.test.run() + + def add_transactions_to_block(self, block, tx_list): + [ tx.rehash() for tx in tx_list ] + block.vtx.extend(tx_list) + + # this is a little handier to use than the version in blocktools.py + def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = create_transaction(spend_tx, n, b"", value, script) + return tx + + # sign a transaction, using the key we know about + # this signs input 0 in tx, which is assumed to be spending output n in spend_tx + def sign_tx(self, tx, spend_tx, n): + scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) + if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend + tx.vin[0].scriptSig = CScript() + return + (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) + tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) + + def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): + tx = self.create_tx(spend_tx, n, value, script) + self.sign_tx(tx, spend_tx, n) + tx.rehash() + return tx + + def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True): + if self.tip == None: + base_block_hash = self.genesis_hash + block_time = int(time.time())+1 + else: + base_block_hash = self.tip.sha256 + block_time = self.tip.nTime + 1 + # First create the coinbase + height = self.block_heights[base_block_hash] + 1 + coinbase = create_coinbase(height, self.coinbase_pubkey) + coinbase.vout[0].nValue += additional_coinbase_value + coinbase.rehash() + if spend == None: + block = create_block(base_block_hash, coinbase, block_time) + else: + coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees + coinbase.rehash() + block = create_block(base_block_hash, coinbase, block_time) + tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi + self.sign_tx(tx, spend.tx, spend.n) + self.add_transactions_to_block(block, [tx]) + block.hashMerkleRoot = block.calc_merkle_root() + if solve: + block.solve() + self.tip = block + self.block_heights[block.sha256] = height + assert number not in self.blocks + self.blocks[number] = block + return block + + def get_tests(self): + self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) + self.block_heights[self.genesis_hash] = 0 + spendable_outputs = [] + + # save the current tip so it can be spent by a later block + def save_spendable_output(): + spendable_outputs.append(self.tip) + + # get an output that we previously marked as spendable + def get_spendable_output(): + return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) + + # returns a test case that asserts that the current tip was accepted + def accepted(): + return TestInstance([[self.tip, True]]) + + # returns a test case that asserts that the current tip was rejected + def rejected(reject = None): + if reject is None: + return TestInstance([[self.tip, False]]) + else: + return TestInstance([[self.tip, reject]]) + + # move the tip back to a previous block + def tip(number): + self.tip = self.blocks[number] + + # adds transactions to the block and updates state + def update_block(block_number, new_transactions): + block = self.blocks[block_number] + self.add_transactions_to_block(block, new_transactions) + old_sha256 = block.sha256 + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Update the internal state just like in next_block + self.tip = block + if block.sha256 != old_sha256: + self.block_heights[block.sha256] = self.block_heights[old_sha256] + del self.block_heights[old_sha256] + self.blocks[block_number] = block + return block + + # shorthand for functions + block = self.next_block + create_tx = self.create_tx + + # shorthand for variables + node = self.nodes[0] + + # Create a new block + block(0) + save_spendable_output() + yield accepted() + + # Now we need that block to mature so we can spend the coinbase. + test = TestInstance(sync_every_block=False) + for i in range(99): + block(5000 + i) + test.blocks_and_transactions.append([self.tip, True]) + save_spendable_output() + yield test + + # Collect spendable outputs now to avoid cluttering the code later on + out = [] + for i in range(33): + out.append(get_spendable_output()) + + # P2SH + # Build the redeem script, hash it, use hash to create the p2sh script + redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) + redeem_script_hash = hash160(redeem_script) + p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) + + # Creates a new transaction using a p2sh transaction as input + def spend_p2sh_tx (p2sh_tx_to_spend, output_script=CScript([OP_TRUE])): + # Create the transaction + spent_p2sh_tx = CTransaction() + spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx_to_spend.sha256, 0), b'')) + spent_p2sh_tx.vout.append(CTxOut(1, output_script)) + # Sign the transaction using the redeem script + (sighash, err) = SignatureHash(redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL) + sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL])) + spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) + spent_p2sh_tx.rehash() + return spent_p2sh_tx + + # P2SH tests + # Create a p2sh transaction + p2sh_tx = self.create_and_sign_transaction(out[0].tx, out[0].n, 1, p2sh_script) + + # Add the transaction to the block + block(1) + update_block(1, [p2sh_tx]) + yield accepted() + + # Sigops p2sh limit for the mempool test + p2sh_sigops_limit_mempool = MAX_STANDARD_TX_SIGOPS - redeem_script.GetSigOpCount(True) + # Too many sigops in one p2sh script + too_many_p2sh_sigops_mempool = CScript([OP_CHECKSIG] * (p2sh_sigops_limit_mempool + 1)) + + # A transaction with this output script can't get into the mempool + try: + node.sendrawtransaction(ToHex(spend_p2sh_tx(p2sh_tx, too_many_p2sh_sigops_mempool))) + except JSONRPCException as exp: + assert_equal(exp.error["message"], RPC_TXNS_TOO_MANY_SIGOPS_ERROR) + else: + assert(False) + + # The transaction is rejected, so the mempool should still be empty + assert_equal(set(node.getrawmempool()), set()) + + # Max sigops in one p2sh txn + max_p2sh_sigops_mempool = CScript([OP_CHECKSIG] * (p2sh_sigops_limit_mempool)) + + # A transaction with this output script can get into the mempool + max_p2sh_sigops_txn = spend_p2sh_tx(p2sh_tx, max_p2sh_sigops_mempool) + max_p2sh_sigops_txn_id = node.sendrawtransaction(ToHex(max_p2sh_sigops_txn)) + assert_equal(set(node.getrawmempool()), {max_p2sh_sigops_txn_id}) + + # Mine the transaction + block(2, spend=out[1]) + update_block(2, [max_p2sh_sigops_txn]) + yield accepted() + + # The transaction has been mined, it's not in the mempool anymore + assert_equal(set(node.getrawmempool()), set()) + +if __name__ == '__main__': + FullBlockTest().main() diff --git a/qa/rpc-tests/test_framework/cdefs.py b/qa/rpc-tests/test_framework/cdefs.py index a6de305fc..e3091dcb7 100644 --- a/qa/rpc-tests/test_framework/cdefs.py +++ b/qa/rpc-tests/test_framework/cdefs.py @@ -1,56 +1,60 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Imports some application default values from source files outside the test framework, and defines equivalents of consensus parameters for the test framework. """ import re # Slurp in consensus.h contents # FIXME: this breaks when tests are not called from top level #_consensus_h_fh = open('src/consensus/consensus.h', 'rt') #_consensus_h_contents = _consensus_h_fh.read() #_consensus_h_fh.close() # This constant is currently needed to evaluate some that are formulas ONE_MEGABYTE = 1000000 # Extract relevant default values parameters # FIXME: re-enable evaluation of consensus.h once that can be read from # subfolder of top level # The maximum allowed block size before the fork LEGACY_MAX_BLOCK_SIZE = ONE_MEGABYTE # Default setting for maximum allowed size for a block, in bytes DEFAULT_MAX_BLOCK_SIZE = 8 * ONE_MEGABYTE # The following consensus parameters should not be automatically imported. # They *should* cause test failures if application code is changed in ways # that violate current consensus. # The maximum allowed number of signature check operations per MB in a block # (network rule) MAX_BLOCK_SIGOPS_PER_MB = 20000 # The maximum allowed number of signature check operations per transaction # (network rule) MAX_TX_SIGOPS_COUNT = 20000 +# The maximum number of sigops we're willing to relay/mine in a single tx +# (policy.h constant) +MAX_STANDARD_TX_SIGOPS = MAX_TX_SIGOPS_COUNT // 5 + # Coinbase transaction outputs can only be spent after this number of new # blocks (network rule) COINBASE_MATURITY = 100 # Anti replay OP_RETURN commitment. ANTI_REPLAY_COMMITMENT = b"Placeholder for the anti replay commitment" if __name__ == "__main__": # Output values if run standalone to verify print("DEFAULT_MAX_BLOCK_SIZE = %d (bytes)" % DEFAULT_MAX_BLOCK_SIZE) print("MAX_BLOCK_SIGOPS_PER_MB = %d (sigops)" % MAX_BLOCK_SIGOPS_PER_MB) print("MAX_TX_SIGOPS_COUNT = %d (sigops)" % MAX_TX_SIGOPS_COUNT) print("COINBASE_MATURITY = %d (blocks)" % COINBASE_MATURITY)