diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py index 71973e387..27971b61f 100755 --- a/test/functional/combine_logs.py +++ b/test/functional/combine_logs.py @@ -1,140 +1,136 @@ #!/usr/bin/env python3 """Combine logs from multiple bitcoin nodes as well as the test_framework log. This streams the combined log output to stdout. Use combine_logs.py > outputfile to write to an outputfile.""" import argparse from collections import defaultdict, namedtuple import glob import heapq import itertools import os import re import sys # Matches on the date format at the start of the log event TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}Z") LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event']) def main(): """Main function. Parses args, reads the log files and renders them as text or html.""" parser = argparse.ArgumentParser( usage='%(prog)s [options] ', description=__doc__) parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)') parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2') args, unknown_args = parser.parse_known_args() - if args.color and os.name != 'posix': - print("Color output requires posix terminal colors.") - sys.exit(1) - if args.html and args.color: print("Only one out of --color or --html should be specified") sys.exit(1) # There should only be one unknown argument - the path of the temporary # test directory if len(unknown_args) != 1: print("Unexpected arguments" + str(unknown_args)) sys.exit(1) log_events = read_logs(unknown_args[0]) print_logs(log_events, color=args.color, html=args.html) def read_logs(tmp_dir): """Reads log files. Delegates to generator function get_log_events() to provide individual log events for each of the input log files.""" # Find out what the folder is called that holds the debug.log file chain = glob.glob("{}/node0/*/debug.log".format(tmp_dir)) if chain: # pick the first one if more than one chain was found (should never # happen) chain = chain[0] # extract the chain name chain = re.search(r'node0/(.+?)/debug\.log$', chain).group(1) else: # fallback to regtest (should only happen when none exists) chain = 'regtest' files = [("test", "{}/test_framework.log".format(tmp_dir))] for i in itertools.count(): logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain) if not os.path.isfile(logfile): break files.append(("node{}".format(i), logfile)) return heapq.merge(*[get_log_events(source, f) for source, f in files]) def get_log_events(source, logfile): """Generator function that returns individual log events. Log events may be split over multiple lines. We use the timestamp regex match as the marker for a new log event.""" try: with open(logfile, 'r', encoding='utf-8') as infile: event = '' timestamp = '' for line in infile: # skip blank lines if line == '\n': continue # if this line has a timestamp, it's the start of a new log # event. time_match = TIMESTAMP_PATTERN.match(line) if time_match: if event: yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) event = line timestamp = time_match.group() # if it doesn't have a timestamp, it's a continuation line of # the previous log. else: event += "\n" + line # Flush the final event yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip()) except FileNotFoundError: print("File {} could not be opened. Continuing without it.".format( logfile), file=sys.stderr) def print_logs(log_events, color=False, html=False): """Renders the iterator of log events into text or html.""" if not html: colors = defaultdict(lambda: '') if color: colors["test"] = "\033[0;36m" # CYAN colors["node0"] = "\033[0;34m" # BLUE colors["node1"] = "\033[0;32m" # GREEN colors["node2"] = "\033[0;31m" # RED colors["node3"] = "\033[0;33m" # YELLOW colors["reset"] = "\033[0m" # Reset font color for event in log_events: print("{0} {1: <5} {2} {3}".format( colors[event.source.rstrip()], event.source, event.event, colors["reset"])) else: try: import jinja2 except ImportError: print("jinja2 not found. Try `pip install jinja2`") sys.exit(1) print(jinja2.Environment(loader=jinja2.FileSystemLoader('./')) .get_template('combined_log_template.html') .render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events])) if __name__ == '__main__': main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index d0da97d36..52abfbf24 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -1,1164 +1,1164 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test block processing.""" import copy import struct import time from test_framework.blocktools import ( create_block, create_coinbase, create_tx_with_script, make_conform_to_ctor, ) from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.key import CECKey from test_framework.messages import ( CBlock, COIN, COutPoint, CTransaction, CTxIn, CTxOut, uint256_from_compact, uint256_from_str, ) from test_framework.mininode import P2PDataStore from test_framework.script import ( CScript, OP_ELSE, OP_ENDIF, OP_FALSE, OP_IF, OP_INVALIDOPCODE, OP_RETURN, OP_TRUE, SIGHASH_ALL, SIGHASH_FORKID, SignatureHashForkId, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal # Use this class for tests that require behavior other than normal "mininode" behavior. # For now, it is used to serialize a bloated varint (b64). class CBrokenBlock(CBlock): def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() def serialize(self): r = b"" r += super(CBlock, self).serialize() r += struct.pack(" b1 (0) -> b2 (1) b1 = self.next_block(1, spend=out[0]) self.save_spendable_output() b2 = self.next_block(2, spend=out[1]) self.save_spendable_output() self.sync_blocks([b1, b2]) # Fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. self.log.info("Don't reorg to a chain of the same length") self.move_tip(1) b3 = self.next_block(3, spend=out[1]) txout_b3 = b3.vtx[1] self.sync_blocks([b3], False) # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) self.log.info("Reorg to a longer chain") b4 = self.next_block(4, spend=out[2]) self.sync_blocks([b4]) # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) self.move_tip(2) b5 = self.next_block(5, spend=out[2]) self.save_spendable_output() self.sync_blocks([b5], False) self.log.info("Reorg back to the original chain") b6 = self.next_block(6, spend=out[3]) self.sync_blocks([b6], True) # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain with a double spend, even if it is longer") self.move_tip(5) b7 = self.next_block(7, spend=out[2]) self.sync_blocks([b7], False) b8 = self.next_block(8, spend=out[4]) self.sync_blocks([b8], False, reconnect=True) # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block where the miner creates too much coinbase reward") self.move_tip(6) b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) self.sync_blocks([b9], success=False, reject_reason='bad-cb-amount', reconnect=True) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") self.move_tip(5) b10 = self.next_block(10, spend=out[3]) self.sync_blocks([b10], False) b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) self.sync_blocks([b11], success=False, reject_reason='bad-cb-amount', reconnect=True) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)") self.move_tip(5) b12 = self.next_block(12, spend=out[3]) self.save_spendable_output() b13 = self.next_block(13, spend=out[4]) self.save_spendable_output() b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) self.sync_blocks([b12, b13, b14], success=False, reject_reason='bad-cb-amount', reconnect=True) # New tip should be b13. assert_equal(node.getbestblockhash(), b13.hash) self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py self.move_tip(13) b15 = self.next_block(15) self.save_spendable_output() self.sync_blocks([b15], True) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) self.log.info("Reject a block with a spend from a re-org'ed out tx") self.move_tip(15) b17 = self.next_block(17, spend=txout_b3) self.sync_blocks([b17], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block with a spend from a re-org'ed out tx (on a forked chain)") self.move_tip(13) b18 = self.next_block(18, spend=txout_b3) self.sync_blocks([b18], False) b19 = self.next_block(19, spend=out[6]) self.sync_blocks([b19], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) self.log.info("Reject a block spending an immature coinbase.") self.move_tip(15) b20 = self.next_block(20, spend=out[7]) self.sync_blocks([b20], success=False, reject_reason='bad-txns-premature-spend-of-coinbase') # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block spending an immature coinbase (on a forked chain)") self.move_tip(13) b21 = self.next_block(21, spend=out[6]) self.sync_blocks([b21], False) b22 = self.next_block(22, spend=out[5]) self.sync_blocks([b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase') # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) self.log.info("Accept a block of size LEGACY_MAX_BLOCK_SIZE") self.move_tip(15) b23 = self.next_block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = self.update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) self.sync_blocks([b23], True) self.save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block with coinbase input script size out of range") self.move_tip(15) b26 = self.next_block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = self.update_block(26, []) self.sync_blocks([b26], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = self.next_block(27, spend=out[7]) self.sync_blocks([b27], False) # Now try a too-large-coinbase script self.move_tip(15) b28 = self.next_block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = self.update_block(28, []) self.sync_blocks([b28], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = self.next_block(29, spend=out[7]) self.sync_blocks([b29], False) # b30 has a max-sized coinbase scriptSig. self.move_tip(23) b30 = self.next_block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = self.update_block(30, []) self.sync_blocks([b30], True) self.save_spendable_output() self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py b31 = self.next_block(31) self.save_spendable_output() b33 = self.next_block(33) self.save_spendable_output() b35 = self.next_block(35) self.save_spendable_output() self.sync_blocks([b31, b33, b35], True) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block self.log.info( "Reject a block spending transaction from a block which failed to connect") self.move_tip(35) b37 = self.next_block(37, spend=out[11]) txout_b37 = b37.vtx[1] tx = self.create_and_sign_transaction(out[11], 0) b37 = self.update_block(37, [tx]) self.sync_blocks([b37], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid self.move_tip(35) b38 = self.next_block(38, spend=txout_b37) self.sync_blocks([b38], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py self.move_tip(35) b39 = self.next_block(39) self.save_spendable_output() b41 = self.next_block(41) self.sync_blocks([b39, b41], True) # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # self.move_tip(39) b42 = self.next_block(42, spend=out[12]) self.save_spendable_output() b43 = self.next_block(43, spend=out[13]) self.save_spendable_output() self.sync_blocks([b42, b43], True) # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. self.log.info("Build block 44 manually") height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 self.sync_blocks([b44], True) self.log.info("Reject a block with a non-coinbase as the first tx") non_coinbase = self.create_tx(out[15], 0, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.block_heights[b45.sha256] = self.block_heights[ self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 self.sync_blocks([b45], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with no transactions") self.move_tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 self.sync_blocks([b46], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with invalid work") self.move_tip(44) b47 = self.next_block(47, solve=False) target = uint256_from_compact(b47.nBits) while b47.sha256 < target: b47.nNonce += 1 b47.rehash() self.sync_blocks([b47], False, request_block=False) self.log.info("Reject a block with a timestamp >2 hours in the future") self.move_tip(44) b48 = self.next_block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() self.sync_blocks([b48], False, request_block=False) self.log.info("Reject a block with invalid merkle hash") self.move_tip(44) b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() self.sync_blocks([b49], success=False, reject_reason='bad-txnmrklroot', reconnect=True) self.log.info("Reject a block with incorrect POW limit") self.move_tip(44) b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() self.sync_blocks([b50], False, request_block=False, reconnect=True) self.log.info("Reject a block with two coinbase transactions") self.move_tip(44) b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = self.update_block(51, [cb2]) self.sync_blocks([b51], success=False, reject_reason='bad-tx-coinbase', reconnect=True) self.log.info("Reject a block with duplicate transactions") self.move_tip(44) b52 = self.next_block(52, spend=out[15]) b52 = self.update_block(52, [b52.vtx[1]]) self.sync_blocks([b52], success=False, reject_reason='tx-duplicate', reconnect=True) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # self.move_tip(43) b53 = self.next_block(53, spend=out[14]) self.sync_blocks([b53], False) self.save_spendable_output() self.log.info("Reject a block with timestamp before MedianTimePast") b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() self.sync_blocks([b54], False, request_block=False) # valid timestamp self.move_tip(53) b55 = self.next_block(55, spend=out[15]) b55.nTime = b35.nTime self.update_block(55, []) self.sync_blocks([b55], True) self.save_spendable_output() # Test Merkle tree malleability # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end self.move_tip(55) b57 = self.next_block(57) tx = self.create_and_sign_transaction(out[16], 1) tx1 = self.create_tx(tx, 0, 1) b57 = self.update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx self.log.info( "Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)") self.move_tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = self.update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) self.sync_blocks([b56], success=False, reject_reason='bad-txns-duplicate', reconnect=True) # b57p2 - a good block with 6 tx'es, don't submit until end self.move_tip(55) b57p2 = self.next_block("57p2") tx = self.create_and_sign_transaction(out[16], 1) tx1 = self.create_tx(tx, 0, 1) tx2 = self.create_tx(tx1, 0, 1) tx3 = self.create_tx(tx2, 0, 1) tx4 = self.create_tx(tx3, 0, 1) b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's self.log.info( "Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)") self.move_tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(len(b56p2.vtx), 6) b56p2 = self.update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) self.sync_blocks([b56p2], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip("57p2") self.sync_blocks([b57p2], True) self.move_tip(57) # The tip is not updated because 57p2 seen first self.sync_blocks([b57], False) self.save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> ??? (17) # # tx with prevout.n out of range self.log.info( "Reject a block with a transaction with prevout.n out of range") self.move_tip(57) b58 = self.next_block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) pad_tx(tx) tx.calc_sha256() b58 = self.update_block(58, [tx]) self.sync_blocks([b58], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # tx with output value > input value self.log.info( "Reject a block with a transaction with outputs > inputs") self.move_tip(57) b59 = self.next_block(59) tx = self.create_and_sign_transaction(out[17], 51 * COIN) b59 = self.update_block(59, [tx]) self.sync_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True) # reset to good chain self.move_tip(57) b60 = self.next_block(60, spend=out[17]) self.sync_blocks([b60], True) self.save_spendable_output() # Test BIP30 # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b61 (18) # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # self.log.info( "Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)") self.move_tip(60) b61 = self.next_block(61, spend=out[18]) # Equalize the coinbases b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig b61.vtx[0].rehash() b61 = self.update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) self.sync_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # self.log.info( "Reject a block with a transaction with a nonfinal locktime") self.move_tip(60) b62 = self.next_block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final # don't set nSequence tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert tx.vin[0].nSequence < 0xffffffff tx.calc_sha256() b62 = self.update_block(62, [tx]) self.sync_blocks([b62], success=False, reject_reason='bad-txns-nonfinal') # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # self.log.info( "Reject a block with a coinbase transaction with a nonfinal locktime") self.move_tip(60) b63 = self.next_block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = self.update_block(63, []) self.sync_blocks([b63], success=False, reject_reason='bad-txns-nonfinal') # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # self.log.info( "Accept a valid block even if a bloated version of the block has previously been sent") self.move_tip(60) regular_block = self.next_block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.blocks["64a"] = b64a self.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) self.sync_blocks([b64a], success=False, - reject_reason='non-canonical ReadCompactSize(): iostream error') + reject_reason='non-canonical ReadCompactSize():') # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently # resend the header message, it won't send us the getdata message again. Just # disconnect and reconnect and then call sync_blocks. # TODO: improve this test to be less dependent on P2P DOS behaviour. node.disconnect_p2ps() self.reconnect_p2p() self.move_tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.blocks[64] = b64 b64 = self.update_block(64, []) self.sync_blocks([b64], True) self.save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # self.log.info( "Accept a block with a transaction spending an output created in the same block") self.move_tip(64) b65 = self.next_block(65) tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 0) b65 = self.update_block(65, [tx1, tx2]) self.sync_blocks([b65], True) self.save_spendable_output() # Attempt to double-spend a transaction created in a block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b67 (20) # # self.log.info( "Reject a block with a transaction double spending a transaction created in the same block") self.move_tip(65) b67 = self.next_block(67) tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 1) tx3 = self.create_and_sign_transaction(tx1, 2) b67 = self.update_block(67, [tx1, tx2, tx3]) self.sync_blocks([b67], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # More tests of block subsidy # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # self.log.info( "Reject a block trying to claim too much subsidy in the coinbase transaction") self.move_tip(65) b68 = self.next_block(68, additional_coinbase_value=10) tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 9) b68 = self.update_block(68, [tx]) self.sync_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True) self.log.info( "Accept a block claiming the correct subsidy in the coinbase transaction") self.move_tip(65) b69 = self.next_block(69, additional_coinbase_value=10) tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 10) self.update_block(69, [tx]) self.sync_blocks([b69], True) self.save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # self.log.info( "Reject a block containing a transaction spending from a non-existent input") self.move_tip(69) b70 = self.next_block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) pad_tx(tx) b70 = self.update_block(70, [tx]) self.sync_blocks([b70], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, # it has the same hash as b72. self.log.info( "Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability") self.move_tip(69) b72 = self.next_block(72) tx1 = self.create_and_sign_transaction(out[21], 2) tx2 = self.create_and_sign_transaction(tx1, 1) b72 = self.update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) # add duplicate last transaction b71.vtx.append(b72.vtx[-1]) # b71 builds off b69 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 self.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) self.move_tip(71) self.sync_blocks([b71], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip(72) self.sync_blocks([b72], True) self.save_spendable_output() self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py b75 = self.next_block(75) self.save_spendable_output() b76 = self.next_block(76) self.save_spendable_output() self.sync_blocks([b75, b76], True) # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) self.log.info("Test transaction resurrection during a re-org") self.move_tip(76) b77 = self.next_block(77) tx77 = self.create_and_sign_transaction(out[24], 10 * COIN) b77 = self.update_block(77, [tx77]) self.sync_blocks([b77], True) self.save_spendable_output() b78 = self.next_block(78) tx78 = self.create_tx(tx77, 0, 9 * COIN) b78 = self.update_block(78, [tx78]) self.sync_blocks([b78], True) b79 = self.next_block(79) tx79 = self.create_tx(tx78, 0, 8 * COIN) b79 = self.update_block(79, [tx79]) self.sync_blocks([b79], True) # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) self.move_tip(77) b80 = self.next_block(80, spend=out[25]) self.sync_blocks([b80], False, request_block=False) self.save_spendable_output() b81 = self.next_block(81, spend=out[26]) # other chain is same length self.sync_blocks([b81], False, request_block=False) self.save_spendable_output() b82 = self.next_block(82, spend=out[27]) # now this chain is longer, triggers re-org self.sync_blocks([b82], True) self.save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert tx78.hash in mempool assert tx79.hash in mempool # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # self.log.info( "Accept a block with invalid opcodes in dead execution paths") b83 = self.next_block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = self.create_and_sign_transaction( out[28], out[28].vout[0].nValue, script) tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() b83 = self.update_block(83, [tx1, tx2]) self.sync_blocks([b83], True) self.save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # self.log.info("Test re-orging blocks with OP_RETURN in them") b84 = self.next_block(84) tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN])) vout_offset = len(tx1.vout) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() self.sign_tx(tx1, out[29]) tx1.rehash() tx2 = self.create_tx(tx1, vout_offset, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = self.create_tx(tx1, vout_offset + 1, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = self.create_tx(tx1, vout_offset + 2, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = self.create_tx(tx1, vout_offset + 3, 0, CScript([OP_RETURN])) b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5]) self.sync_blocks([b84], True) self.save_spendable_output() self.move_tip(83) b85 = self.next_block(85, spend=out[29]) self.sync_blocks([b85], False) # other chain is same length b86 = self.next_block(86, spend=out[30]) self.sync_blocks([b86], True) self.move_tip(84) b87 = self.next_block(87, spend=out[30]) self.sync_blocks([b87], False) # other chain is same length self.save_spendable_output() b88 = self.next_block(88, spend=out[31]) self.sync_blocks([b88], True) self.save_spendable_output() # trying to spend the OP_RETURN output is rejected b89a = self.next_block("89a", spend=out[32]) tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) b89a = self.update_block("89a", [tx]) self.sync_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info( "Test a re-org of one week's worth of blocks (1088 blocks)") self.move_tip(88) LARGE_REORG_SIZE = 1088 blocks = [] spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = self.next_block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = self.update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) blocks.append(b) self.save_spendable_output() spend = self.get_spendable_output() self.sync_blocks(blocks, True, timeout=960) chain1_tip = i # now create alt chain of same length self.move_tip(88) blocks2 = [] for i in range(89, LARGE_REORG_SIZE + 89): blocks2.append(self.next_block("alt" + str(i))) self.sync_blocks(blocks2, False, request_block=False) # extend alt chain to trigger re-org block = self.next_block("alt" + str(chain1_tip + 1)) self.sync_blocks([block], True, timeout=960) # ... and re-org back to the first chain self.move_tip(chain1_tip) block = self.next_block(chain1_tip + 1) self.sync_blocks([block], False, request_block=False) block = self.next_block(chain1_tip + 2) self.sync_blocks([block], True, timeout=960) # Helper methods ################ def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): return create_tx_with_script( spend_tx, n, amount=value, script_pub_key=script) # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx): scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[0].nValue) tx.vin[0].scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction( self, spend_tx, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, 0, value, script) self.sign_tx(tx, spend_tx) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True): if self.tip is None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value coinbase.rehash() if spend is None: block = create_block(base_block_hash, coinbase, block_time) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.vout[0].nValue - 1 coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # spend 1 satoshi tx = self.create_tx(spend, 0, 1, script) self.sign_tx(tx, spend) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block # save the current tip so it can be spent by a later block def save_spendable_output(self): self.log.debug("saving spendable output {}".format(self.tip.vtx[0])) self.spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(self): self.log.debug("getting spendable output {}".format( self.spendable_outputs[0].vtx[0])) return self.spendable_outputs.pop(0).vtx[0] # move the tip back to a previous block def move_tip(self, number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(self, block_number, new_transactions, reorder=True): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 if reorder: make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block def bootstrap_p2p(self): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" self.nodes[0].add_p2p_connection(P2PDataStore()) # We need to wait for the initial getheaders from the peer before we # start populating our blockstore. If we don't, then we may run ahead # to the next subtest before we receive the getheaders. We'd then send # an INV for the next block and receive two getheaders - one for the # IBD and one for the INV. We'd respond to both and could get # unexpectedly disconnected if the DoS score for that error is 50. self.nodes[0].p2p.wait_for_getheaders(timeout=5) def reconnect_p2p(self): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() self.bootstrap_p2p() def sync_blocks(self, blocks, success=True, reject_reason=None, request_block=True, reconnect=False, timeout=60): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, request_block=request_block, timeout=timeout, expect_disconnect=reconnect) if reconnect: self.reconnect_p2p() if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 5ba783c83..e8ec99232 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -1,197 +1,206 @@ #!/usr/bin/env python3 # Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to bitcoind. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal import http.client import json import logging +import os import socket import time import urllib.parse HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error): try: errmsg = '{} ({})'.format(rpc_error['message'], rpc_error['code']) except (KeyError, TypeError): errmsg = '' super().__init__(errmsg) self.error = rpc_error def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") class AuthServiceProxy(): __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) - port = 80 if self.__url.port is None else self.__url.port user = None if self.__url.username is None else self.__url.username.encode( 'utf8') passwd = None if self.__url.password is None else self.__url.password.encode( 'utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) - - if connection: - # Callables re-use the connection of the original proxy - self.__conn = connection - elif self.__url.scheme == 'https': - self.__conn = http.client.HTTPSConnection( - self.__url.hostname, port, timeout=timeout) - else: - self.__conn = http.client.HTTPConnection( - self.__url.hostname, port, timeout=timeout) + self.timeout = timeout + self._set_conn(connection) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): # Python internal stuff raise AttributeError if self._service_name is not None: name = "{}.{}".format(self._service_name, name) return AuthServiceProxy( self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): ''' Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. ''' headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} + if os.name == 'nt': + # Windows somehow does not like to re-use connections + # TODO: Find out why the connection would disconnect occasionally + # and make it reusable on Windows + self._set_conn() try: self.__conn.request(method, path, postdata, headers) return self._get_response() except http.client.BadStatusLine as e: if e.line == "''": # if connection was closed, try again self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 log.debug("-{}-> {} {}".format( AuthServiceProxy.__id_count, self._service_name, json.dumps( args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) if args and argsn: raise ValueError( 'Cannot handle both named and positional arguments') return {'version': '1.1', 'method': self._service_name, 'params': args or argsn, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): postdata = json.dumps(self.get_request( *args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) response = self._request( 'POST', self.__url.path, postdata.encode('utf-8')) if response['error'] is not None: raise JSONRPCException(response['error']) elif 'result' not in response: raise JSONRPCException({ 'code': -343, 'message': 'missing JSON-RPC result'}) else: return response['result'] def batch(self, rpc_call_list): postdata = json.dumps( list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) return self._request('POST', self.__url.path, postdata.encode('utf-8')) def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout: raise JSONRPCException({ 'code': -344, 'message': '{!r} RPC took longer than {} seconds. Consider ' 'using larger timeout for calls that take ' 'longer to return.'.format(self._service_name, self.__conn.timeout)}) if http_response is None: raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException({ 'code': -342, 'message': 'non-JSON HTTP response with \'{} {}\' from server'.format( http_response.status, http_response.reason)}) responsedata = http_response.read().decode('utf8') response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: log.debug("<-{}- [{:.6f}] {}".format(response["id"], elapsed, json.dumps( response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: log.debug("<-- [{:.6f}] {}".format(elapsed, responsedata)) return response def __truediv__(self, relative_uri): return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn) + + def _set_conn(self, connection=None): + port = 80 if self.__url.port is None else self.__url.port + if connection: + self.__conn = connection + self.timeout = connection.timeout + elif self.__url.scheme == 'https': + self.__conn = http.client.HTTPSConnection( + self.__url.hostname, port, timeout=self.timeout) + else: + self.__conn = http.client.HTTPConnection( + self.__url.hostname, port, timeout=self.timeout) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 56c342296..ba70cf08a 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -1,840 +1,871 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. Functional tests are disabled on Windows by default. Use --force to run them anyway. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse from collections import deque import configparser import datetime import os import time import shutil import sys import subprocess import tempfile import re import logging import xml.etree.ElementTree as ET import json import threading import multiprocessing from queue import Queue, Empty # Formatting. Default colors to empty strings. -BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") +BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " -if os.name == 'posix': +if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): + if os.name == 'nt': + import ctypes + kernel32 = ctypes.windll.kernel32 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + # Enable ascii color control to stdout + stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE) + stdout_mode = ctypes.c_int32() + kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode)) + kernel32.SetConsoleMode( + stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) + # Enable ascii color control to stderr + stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE) + stderr_mode = ctypes.c_int32() + kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode)) + kernel32.SetConsoleMode( + stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') - BLUE = ('\033[0m', '\033[0;34m') + GREEN = ('\033[0m', '\033[0;32m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but # are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] TEST_PARAMS = { # Some test can be run with additional parameters. # When a test is listed here, the it will be run without parameters # as well as with additional parameters listed here. # This: # example "testName" : [["--param1", "--param2"] , ["--param3"]] # will run the test 3 times: # testName # testName --param1 --param2 # testname --param3 "wallet_txn_doublespend.py": [["--mineblock"]], "wallet_txn_clone.py": [["--mineblock"]], "wallet_createwallet.py": [["--usecli"]], "wallet_multiwallet.py": [["--usecli"]], } # Used to limit the number of tests, when list of tests is not provided on command line # When --extended is specified, we run all tests, otherwise # we only run a test if its execution time in seconds does not exceed # EXTENDED_CUTOFF DEFAULT_EXTENDED_CUTOFF = 40 DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 class TestCase(): """ Data structure to hold and run information necessary to launch a test case. """ def __init__(self, test_num, test_case, tests_dir, tmpdir, failfast_event, flags=None): self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_case = test_case self.test_num = test_num self.failfast_event = failfast_event self.flags = flags def run(self, portseed_offset): if self.failfast_event.is_set(): return TestResult(self.test_num, self.test_case, "", "Skipped", 0, "", "") portseed = self.test_num + portseed_offset portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = self.test_case.split() testdir = os.path.join("{}", "{}_{}").format( self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] time0 = time.time() process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr) process.wait() log_stdout.seek(0), log_stderr.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_stdout, log_stderr)] log_stdout.close(), log_stderr.close() if process.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif process.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" return TestResult(self.test_num, self.test_case, testdir, status, int(time.time() - time0), stdout, stderr) def on_ci(): return os.getenv('TRAVIS') == 'true' or os.getenv( 'TEAMCITY_VERSION') is not None def main(): # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.join(os.path.abspath( os.path.dirname(__file__)), "..", "config.ini") config.read_file(open(configfile, encoding="utf8")) src_dir = config["environment"]["SRCDIR"] build_dir = config["environment"]["BUILDDIR"] tests_dir = os.path.join(src_dir, 'test', 'functional') # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument( '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, help='set the cutoff runtime for what tests get run') parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS, help='how many test scripts to run in parallel.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs") parser.add_argument( '--failfast', action='store_true', help='stop execution after the first test failure') parser.add_argument('--junitoutput', '-J', default='junit_results.xml', help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.") parser.add_argument('--testsuitename', '-n', default='Bitcoin ABC functional tests', help="Name of the test suite, as it will appear in the logs and in the JUnit report.") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the # remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] passon_args.append("--configfile={}".format(configfile)) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) logging.info("Starting {}".format(args.testsuitename)) # Create base test directory tmpdir = os.path.join("{}", "test_runner_₿_🏃_{:%Y%m%d_%H%M%S}").format( args.tmpdirprefix, datetime.datetime.now()) + + # If we fixed the command-line and filename encoding issue on Windows, + # these two lines could be removed + if config["environment"]["EXEEXT"] == ".exe": + tmpdir = os.path.join("{}", "test_runner_{:%Y%m%d_%H%M%S}").format( + args.tmpdirprefix, datetime.datetime.now()) + os.makedirs(tmpdir) logging.debug("Temporary test directory at {}".format(tmpdir)) if not os.path.isabs(args.junitoutput): args.junitoutput = os.path.join(tmpdir, args.junitoutput) enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if config["environment"]["EXEEXT"] == ".exe" and not args.force: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print( "Tests currently disabled on Windows by default. Use --force option to enable") sys.exit(0) if not enable_bitcoind: print("No functional tests to run.") print("Rerun ./configure with --with-daemon and then make") sys.exit(0) # Build list of tests all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS) # Check all tests with parameters actually exist for test in TEST_PARAMS: if test not in all_scripts: print("ERROR: Test with parameter {} does not exist, check it has " "not been renamed or deleted".format(test)) sys.exit(1) if tests: # Individual tests have been specified. Run specified tests that exist # in the all_scripts list. Accept the name with or without .py # extension. individual_tests = [ re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')] test_list = [] for t in individual_tests: if t in all_scripts: test_list.append(t) else: print("{}WARNING!{} Test '{}' not found in full test list.".format( BOLD[1], BOLD[0], t)) # Allow for wildcard at the end of the name, so a single input can # match multiple tests for test in tests: if test.endswith('*'): test_list.extend( [t for t in all_scripts if t.startswith(test[:-1])]) # do not cut off explicitly specified tests cutoff = sys.maxsize else: # No individual tests have been specified. # Run all tests that do not exceed test_list = all_scripts cutoff = args.cutoff if args.extended: cutoff = sys.maxsize # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: tests_excl = [re.sub(r"\.py$", "", t) - + ".py" for t in args.exclude.split(',')] + + (".py" if ".py" not in t else "") for t in args.exclude.split(',')] for exclude_test in tests_excl: if exclude_test in test_list: test_list.remove(exclude_test) else: print("{}WARNING!{} Test '{}' not found in current test list.".format( BOLD[1], BOLD[0], exclude_test)) # Update timings from build_dir only if separate build directory is used. # We do not want to pollute source directory. build_timings = None if (src_dir != build_dir): build_timings = Timings(os.path.join(build_dir, 'timing.json')) # Always use timings from scr_dir if present src_timings = Timings(os.path.join( src_dir, "test", "functional", 'timing.json')) # Add test parameters and remove long running tests if needed test_list = get_tests_to_run( test_list, TEST_PARAMS, cutoff, src_timings) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script # and exit. parser.print_help() subprocess.check_call( [sys.executable, os.path.join(tests_dir, test_list[0]), '-h']) sys.exit(0) check_script_prefixes(all_scripts) if not args.keepcache: shutil.rmtree(os.path.join(build_dir, "test", "cache"), ignore_errors=True) run_tests( test_list, build_dir, tests_dir, args.junitoutput, tmpdir, num_jobs=args.jobs, test_suite_name=args.testsuitename, enable_coverage=args.coverage, args=passon_args, combined_logs_len=args.combinedlogslen, build_timings=build_timings, failfast=args.failfast ) def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name, enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False): args = args or [] # Warn if bitcoind is already running (unix only) try: pidofOutput = subprocess.check_output(["pidof", "bitcoind"]) if pidofOutput is not None and pidofOutput != b'': print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format( BOLD[1], BOLD[0])) except (OSError, subprocess.SubprocessError): pass # Warn if there is a cache directory cache_dir = os.path.join(build_dir, "test", "cache") if os.path.isdir(cache_dir): print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format( BOLD[1], BOLD[0], cache_dir)) flags = ['--cachedir={}'.format(cache_dir)] + args if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug( "Initializing coverage directory at {}".format(coverage.dir)) else: coverage = None if len(test_list) > 1 and num_jobs > 1: # Populate cache try: subprocess.check_output([sys.executable, os.path.join( tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise # Run Tests time0 = time.time() test_results = execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast) runtime = int(time.time() - time0) max_len_name = len(max(test_list, key=len)) print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len) save_results_as_junit(test_results, junitoutput, runtime, test_suite_name) if (build_timings is not None): build_timings.save_timings(test_results) if coverage: coverage_passed = coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() else: coverage_passed = True # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(map( lambda test_result: test_result.was_successful, test_results)) and coverage_passed sys.exit(not all_passed) def execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False): update_queue = Queue() job_queue = Queue() failfast_event = threading.Event() test_results = [] poll_timeout = 10 # seconds # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) portseed_offset = int(time.time() * 1000) % 625 ## # Define some helper functions we will need for threading. ## def handle_message(message, running_jobs): """ handle_message handles a single message from handle_test_cases """ if isinstance(message, TestCase): running_jobs.append((message.test_num, message.test_case)) print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0])) return if isinstance(message, TestResult): test_result = message running_jobs.remove((test_result.num, test_result.name)) test_results.append(test_result) if test_result.status == "Passed": print("{}{}{} passed, Duration: {} s".format( BOLD[1], test_result.name, BOLD[0], test_result.time)) elif test_result.status == "Skipped": print("{}{}{} skipped".format( BOLD[1], test_result.name, BOLD[0])) else: print("{}{}{} failed, Duration: {} s\n".format( BOLD[1], test_result.name, BOLD[0], test_result.time)) print(BOLD[1] + 'stdout:' + BOLD[0]) print(test_result.stdout) print(BOLD[1] + 'stderr:' + BOLD[0]) print(test_result.stderr) if failfast: logging.debug("Early exiting after test failure") failfast_event.set() return assert False, "we should not be here" def handle_update_messages(): """ handle_update_messages waits for messages to be sent from handle_test_cases via the update_queue. It serializes the results so we can print nice status update messages. """ printed_status = False running_jobs = [] while True: message = None try: message = update_queue.get(True, poll_timeout) if message is None: break # We printed a status message, need to kick to the next line # before printing more. if printed_status: print() printed_status = False handle_message(message, running_jobs) update_queue.task_done() except Empty: if not on_ci(): print("Running jobs: {}".format( ", ".join([j[1] for j in running_jobs])), end="\r") sys.stdout.flush() printed_status = True def handle_test_cases(): """ job_runner represents a single thread that is part of a worker pool. It waits for a test, then executes that test. It also reports start and result messages to handle_update_messages """ while True: test = job_queue.get() if test is None: break # Signal that the test is starting to inform the poor waiting # programmer update_queue.put(test) result = test.run(portseed_offset) update_queue.put(result) job_queue.task_done() ## # Setup our threads, and start sending tasks ## # Start our result collection thread. resultCollector = threading.Thread(target=handle_update_messages) resultCollector.daemon = True resultCollector.start() # Start some worker threads for j in range(num_jobs): t = threading.Thread(target=handle_test_cases) t.daemon = True t.start() # Push all our test cases into the job queue. for i, t in enumerate(test_list): job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags)) # Wait for all the jobs to be completed job_queue.join() # Wait for all the results to be compiled update_queue.join() # Flush our queues so the threads exit update_queue.put(None) for j in range(num_jobs): job_queue.put(None) return test_results def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len): results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( "TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) testdir = test_result.testdir if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print('{}Combine the logs and print the last {} lines ...{}'.format( BOLD[1], combined_logs_len, BOLD[0])) print('\n============') print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) print('============\n') - combined_logs, _ = subprocess.Popen([sys.executable, os.path.join( - tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate() + combined_logs_args = [ + sys.executable, os.path.join( + tests_dir, 'combine_logs.py'), testdir] + + if BOLD[0]: + combined_logs_args += ['--color'] + combined_logs, _ = subprocess.Popen( + combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate() print( "\n".join( deque( combined_logs.splitlines(), combined_logs_len))) status = TICK + "Passed" if all_passed else CROSS + "Failed" if not all_passed: results += RED[1] results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format( "ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] if not all_passed: results += RED[0] results += "Runtime: {} s\n".format(runtime) print(results) class TestResult(): """ Simple data structure to store test result values and print them properly """ def __init__(self, num, name, testdir, status, time, stdout, stderr): self.num = num self.name = name self.testdir = testdir self.status = status self.time = time self.padding = 0 self.stdout = stdout self.stderr = stderr def sort_key(self): if self.status == "Passed": return 0, self.name.lower() elif self.status == "Failed": return 2, self.name.lower() elif self.status == "Skipped": return 1, self.name.lower() def __repr__(self): if self.status == "Passed": - color = BLUE + color = GREEN glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "{} | {}{} | {} s\n".format( self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] @property def was_successful(self): return self.status != "Failed" def get_all_scripts_from_disk(test_dir, non_scripts): """ Return all available test script from script directory (excluding NON_SCRIPTS) """ python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) return list(python_files - set(non_scripts)) def check_script_prefixes(all_scripts): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" EXPECTED_VIOLATION_COUNT = 29 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming # convention don't immediately cause the tests to fail. LEEWAY = 10 good_prefixes_re = re.compile( "(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_") bad_script_names = [ script for script in all_scripts if good_prefixes_re.match(script) is None] if len(bad_script_names) < EXPECTED_VIOLATION_COUNT: print( "{}HURRAY!{} Number of functional tests violating naming convention reduced!".format( BOLD[1], BOLD[0])) print("Consider reducing EXPECTED_VIOLATION_COUNT from {} to {}".format( EXPECTED_VIOLATION_COUNT, len(bad_script_names))) elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT: print( "INFO: {} tests not meeting naming conventions (expected {}):".format(len(bad_script_names), EXPECTED_VIOLATION_COUNT)) print(" {}".format("\n ".join(sorted(bad_script_names)))) assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + \ LEEWAY, "Too many tests not following naming convention! ({} found, expected: <= {})".format( len(bad_script_names), EXPECTED_VIOLATION_COUNT) def get_tests_to_run(test_list, test_params, cutoff, src_timings): """ Returns only test that will not run longer that cutoff. Long running tests are returned first to favor running tests in parallel Timings from build directory override those from src directory """ def get_test_time(test): # Return 0 if test is unknown to always run it return next( (x['time'] for x in src_timings.existing_timings if x['name'] == test), 0) # Some tests must also be run with additional parameters. Add them to the # list. tests_with_params = [] for test_name in test_list: # always execute a test without parameters tests_with_params.append(test_name) params = test_params.get(test_name) if params is not None: tests_with_params.extend( [test_name + " " + " ".join(p) for p in params]) result = [t for t in tests_with_params if get_test_time(t) <= cutoff] result.sort(key=lambda x: (-get_test_time(x), x)) return result class RPCCoverage(): """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir={}'.format(self.dir) def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - {}\n".format(i)) for i in sorted(uncovered))) return False else: print("All RPC commands covered.") return True def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test-framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r', encoding="utf8") as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r', encoding="utf8") as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds def save_results_as_junit(test_results, file_name, time, test_suite_name): """ Save tests results to file in JUnit format See http://llg.cubic.org/docs/junit/ for specification of format """ e_test_suite = ET.Element("testsuite", {"name": "{}".format(test_suite_name), "tests": str(len(test_results)), # "errors": "failures": str(len([t for t in test_results if t.status == "Failed"])), "id": "0", "skipped": str(len([t for t in test_results if t.status == "Skipped"])), "time": str(time), "timestamp": datetime.datetime.now().isoformat('T') }) for test_result in test_results: e_test_case = ET.SubElement(e_test_suite, "testcase", {"name": test_result.name, "classname": test_result.name, "time": str(test_result.time) } ) if test_result.status == "Skipped": ET.SubElement(e_test_case, "skipped") elif test_result.status == "Failed": ET.SubElement(e_test_case, "failure") # no special element for passed tests ET.SubElement(e_test_case, "system-out").text = test_result.stdout ET.SubElement(e_test_case, "system-err").text = test_result.stderr ET.ElementTree(e_test_suite).write( file_name, "UTF-8", xml_declaration=True) class Timings(): """ Takes care of loading, merging and saving tests execution times. """ def __init__(self, timing_file): self.timing_file = timing_file self.existing_timings = self.load_timings() def load_timings(self): if os.path.isfile(self.timing_file): with open(self.timing_file, encoding="utf8") as f: return json.load(f) else: return [] def get_merged_timings(self, new_timings): """ Return new list containing existing timings updated with new timings Tests that do not exists are not removed """ key = 'name' merged = {} for item in self.existing_timings + new_timings: if item[key] in merged: merged[item[key]].update(item) else: merged[item[key]] = item # Sort the result to preserve test ordering in file merged = list(merged.values()) merged.sort(key=lambda t, key=key: t[key]) return merged def save_timings(self, test_results): # we only save test that have passed - timings for failed test might be # wrong (timeouts or early fails) passed_results = [t for t in test_results if t.status == 'Passed'] new_timings = list(map(lambda t: {'name': t.name, 'time': t.time}, passed_results)) merged_timings = self.get_merged_timings(new_timings) with open(self.timing_file, 'w', encoding="utf8") as f: json.dump(merged_timings, f, indent=True) if __name__ == '__main__': main() diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index 011c88613..cd72f2df2 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -1,400 +1,405 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test multiwallet. Verify that a bitcoind node can load multiple wallet files """ import os import shutil from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import ( assert_equal, assert_raises_rpc_error, ) class MultiWalletTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.supports_cli = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): node = self.nodes[0] def data_dir(*p): return os.path.join(node.datadir, 'regtest', *p) def wallet_dir(*p): return data_dir('wallets', *p) def wallet(name): return node.get_wallet_rpc(name) def wallet_file(name): if os.path.isdir(wallet_dir(name)): return wallet_dir(name, "wallet.dat") return wallet_dir(name) assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': ''}]}) # check wallet.dat is created self.stop_nodes() assert_equal(os.path.isfile(wallet_dir('wallet.dat')), True) # create symlink to verify wallet directory path can be referenced # through symlink - os.mkdir(wallet_dir('w7')) - os.symlink('w7', wallet_dir('w7_symlink')) + if os.name != 'nt': + os.mkdir(wallet_dir('w7')) + os.symlink('w7', wallet_dir('w7_symlink')) # rename wallet.dat to make sure plain wallet file paths (as opposed to # directory paths) can be loaded os.rename(wallet_dir("wallet.dat"), wallet_dir("w8")) # create another dummy wallet for use in testing backups later self.start_node(0, []) self.stop_nodes() empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat') os.rename(wallet_dir("wallet.dat"), empty_wallet) # restart node with a mix of wallet names: # w1, w2, w3 - to verify new wallets created when non-existing paths specified # w - to verify wallet name matching works when one wallet path is prefix of another # sub/w5 - to verify relative wallet path is created correctly # extern/w6 - to verify absolute wallet path is created correctly # w7_symlink - to verify symlinked wallet path is initialized correctly # w8 - to verify existing wallet file is loaded correctly # '' - to verify default wallet file is created correctly wallet_names = ['w1', 'w2', 'w3', 'w', 'sub/w5', os.path.join(self.options.tmpdir, 'extern/w6'), 'w7_symlink', 'w8', ''] + if os.name == 'nt': + wallet_names.remove('w7_symlink') extra_args = ['-wallet={}'.format(n) for n in wallet_names] self.start_node(0, extra_args) assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), [ '', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8']) assert_equal(set(node.listwallets()), set(wallet_names)) # check that all requested wallets were created self.stop_node(0) for wallet_name in wallet_names: assert_equal(os.path.isfile(wallet_file(wallet_name)), True) # should not initialize if wallet path can't be created - exp_stderr = "boost::filesystem::create_directory: (The system cannot find the path specified|Not a directory):" + exp_stderr = "boost::filesystem::create_directory:" self.nodes[0].assert_start_raises_init_error( ['-wallet=wallet.dat/bad'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.nodes[0].assert_start_raises_init_error( ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist') self.nodes[0].assert_start_raises_init_error( ['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir()) self.nodes[0].assert_start_raises_init_error( ['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir()) # should not initialize if there are duplicate wallets self.nodes[0].assert_start_raises_init_error( ['-wallet=w1', '-wallet=w1'], 'Error: Error loading wallet w1. Duplicate -wallet filename specified.') # should not initialize if one wallet is a copy of another shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy')) exp_stderr = r"BerkeleyBatch: Can't open database w8_copy \(duplicates fileid \w+ from w8\)" self.nodes[0].assert_start_raises_init_error( ['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) # should not initialize if wallet file is a symlink - os.symlink('w8', wallet_dir('w8_symlink')) - self.nodes[0].assert_start_raises_init_error( - ['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX) + if os.name != 'nt': + os.symlink('w8', wallet_dir('w8_symlink')) + self.nodes[0].assert_start_raises_init_error( + ['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX) # should not initialize if the specified walletdir does not exist self.nodes[0].assert_start_raises_init_error( ['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist') # should not initialize if the specified walletdir is not a directory not_a_dir = wallet_dir('notadir') open(not_a_dir, 'a', encoding="utf8").close() self.nodes[0].assert_start_raises_init_error( ['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory') self.log.info("Do not allow -zapwallettxes with multiwallet") self.nodes[0].assert_start_raises_init_error( [ '-zapwallettxes', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file") self.nodes[0].assert_start_raises_init_error( [ '-zapwallettxes=1', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file") self.nodes[0].assert_start_raises_init_error( [ '-zapwallettxes=2', '-wallet=w1', '-wallet=w2'], "Error: -zapwallettxes is only allowed with a single wallet file") self.log.info("Do not allow -salvagewallet with multiwallet") self.nodes[0].assert_start_raises_init_error( [ '-salvagewallet', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file") self.nodes[0].assert_start_raises_init_error( [ '-salvagewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -salvagewallet is only allowed with a single wallet file") self.log.info("Do not allow -upgradewallet with multiwallet") self.nodes[0].assert_start_raises_init_error( [ '-upgradewallet', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file") self.nodes[0].assert_start_raises_init_error( [ '-upgradewallet=1', '-wallet=w1', '-wallet=w2'], "Error: -upgradewallet is only allowed with a single wallet file") # if wallets/ doesn't exist, datadir should be the default wallet dir wallet_dir2 = data_dir('walletdir') os.rename(wallet_dir(), wallet_dir2) self.start_node(0, ['-wallet=w4', '-wallet=w5']) assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") w5.generate(1) # now if wallets/ exists again, but the rootdir is specified as the # walletdir, w4 and w5 should still be loaded os.rename(wallet_dir2, wallet_dir()) self.restart_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()]) assert_equal(set(node.listwallets()), {"w4", "w5"}) w5 = wallet("w5") w5_info = w5.getwalletinfo() assert_equal(w5_info['immature_balance'], 50) competing_wallet_dir = os.path.join( self.options.tmpdir, 'competing_walletdir') os.mkdir(competing_wallet_dir) self.restart_node(0, ['-walletdir=' + competing_wallet_dir]) exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\"!" self.nodes[1].assert_start_raises_init_error( ['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX) self.restart_node(0, extra_args) assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), [ '', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy']) wallets = [wallet(w) for w in wallet_names] wallet_bad = wallet("bad") # check wallet names and balances wallets[0].generate(1) for wallet_name, wallet in zip(wallet_names, wallets): info = wallet.getwalletinfo() assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0) assert_equal(info['walletname'], wallet_name) # accessing invalid wallet fails assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo) # accessing wallet RPC without using wallet endpoint fails assert_raises_rpc_error(-19, "Wallet file not specified (must request wallet RPC through /wallet/ uri-path).", node.getwalletinfo) w1, w2, w3, w4, *_ = wallets w1.generate(101) assert_equal(w1.getbalance(), 100) assert_equal(w2.getbalance(), 0) assert_equal(w3.getbalance(), 0) assert_equal(w4.getbalance(), 0) w1.sendtoaddress(w2.getnewaddress(), 1) w1.sendtoaddress(w3.getnewaddress(), 2) w1.sendtoaddress(w4.getnewaddress(), 3) w1.generate(1) assert_equal(w2.getbalance(), 1) assert_equal(w3.getbalance(), 2) assert_equal(w4.getbalance(), 3) batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()]) assert_equal(batch[0]["result"]["chain"], "regtest") assert_equal(batch[1]["result"]["walletname"], "w1") self.log.info('Check for per-wallet settxfee call') assert_equal(w1.getwalletinfo()['paytxfee'], 0) assert_equal(w2.getwalletinfo()['paytxfee'], 0) w2.settxfee(4.0) assert_equal(w1.getwalletinfo()['paytxfee'], 0) assert_equal(w2.getwalletinfo()['paytxfee'], 4.0) self.log.info("Test dynamic wallet loading") self.restart_node(0, ['-nowallet']) assert_equal(node.listwallets(), []) assert_raises_rpc_error(-32601, "Method not found", node.getwalletinfo) self.log.info("Load first wallet") loadwallet_name = node.loadwallet(wallet_names[0]) assert_equal(loadwallet_name['name'], wallet_names[0]) assert_equal(node.listwallets(), wallet_names[0:1]) node.getwalletinfo() w1 = node.get_wallet_rpc(wallet_names[0]) w1.getwalletinfo() self.log.info("Load second wallet") loadwallet_name = node.loadwallet(wallet_names[1]) assert_equal(loadwallet_name['name'], wallet_names[1]) assert_equal(node.listwallets(), wallet_names[0:2]) assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo) w2 = node.get_wallet_rpc(wallet_names[1]) w2.getwalletinfo() self.log.info("Load remaining wallets") for wallet_name in wallet_names[2:]: loadwallet_name = self.nodes[0].loadwallet(wallet_name) assert_equal(loadwallet_name['name'], wallet_name) assert_equal(set(self.nodes[0].listwallets()), set(wallet_names)) # Fail to load if wallet doesn't exist assert_raises_rpc_error(-18, 'Wallet wallets not found.', self.nodes[0].loadwallet, 'wallets') # Fail to load duplicate wallets assert_raises_rpc_error(-4, 'Wallet file verification failed: Error loading wallet w1. Duplicate -wallet filename specified.', self.nodes[0].loadwallet, wallet_names[0]) # Fail to load duplicate wallets by different ways (directory and # filepath) assert_raises_rpc_error(-4, "Wallet file verification failed: Error loading wallet wallet.dat. Duplicate -wallet filename specified.", self.nodes[0].loadwallet, 'wallet.dat') # Fail to load if one wallet is a copy of another assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if one wallet is a copy of another. # Test this twice to make sure that we don't re-introduce # https://github.com/bitcoin/bitcoin/issues/14304 assert_raises_rpc_error(-1, "BerkeleyBatch: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy') # Fail to load if wallet file is a symlink - assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", - self.nodes[0].loadwallet, 'w8_symlink') + if os.name != 'nt': + assert_raises_rpc_error(-4, "Wallet file verification failed: Invalid -wallet path 'w8_symlink'", + self.nodes[0].loadwallet, 'w8_symlink') # Fail to load if a directory is specified that doesn't contain a # wallet os.mkdir(wallet_dir('empty_wallet_dir')) assert_raises_rpc_error(-18, "Directory empty_wallet_dir does not contain a wallet.dat file", self.nodes[0].loadwallet, 'empty_wallet_dir') self.log.info("Test dynamic wallet creation.") # Fail to create a wallet if it already exists. assert_raises_rpc_error(-4, "Wallet w2 already exists.", self.nodes[0].createwallet, 'w2') # Successfully create a wallet with a new name loadwallet_name = self.nodes[0].createwallet('w9') assert_equal(loadwallet_name['name'], 'w9') w9 = node.get_wallet_rpc('w9') assert_equal(w9.getwalletinfo()['walletname'], 'w9') assert 'w9' in self.nodes[0].listwallets() # Successfully create a wallet using a full path new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir') new_wallet_name = os.path.join(new_wallet_dir, 'w10') loadwallet_name = self.nodes[0].createwallet(new_wallet_name) assert_equal(loadwallet_name['name'], new_wallet_name) w10 = node.get_wallet_rpc(new_wallet_name) assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name) assert new_wallet_name in self.nodes[0].listwallets() self.log.info("Test dynamic wallet unloading") # Test `unloadwallet` errors assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet) assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy") assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet) assert_raises_rpc_error(-8, "Cannot unload the requested wallet", w1.unloadwallet, "w2"), # Successfully unload the specified wallet name self.nodes[0].unloadwallet("w1") assert 'w1' not in self.nodes[0].listwallets() # Successfully unload the wallet referenced by the request endpoint w2.unloadwallet() assert 'w2' not in self.nodes[0].listwallets() # Successfully unload all wallets for wallet_name in self.nodes[0].listwallets(): self.nodes[0].unloadwallet(wallet_name) assert_equal(self.nodes[0].listwallets(), []) assert_raises_rpc_error(-32601, "Method not found (wallet method is disabled because no wallet is loaded)", self.nodes[0].getwalletinfo) # Successfully load a previously unloaded wallet self.nodes[0].loadwallet('w1') assert_equal(self.nodes[0].listwallets(), ['w1']) assert_equal(w1.getwalletinfo()['walletname'], 'w1') assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), [ '', os.path.join('sub', 'w5'), 'w', 'w1', 'w2', 'w3', 'w7', 'w7_symlink', 'w8', 'w8_copy', 'w9']) # Test backing up and restoring wallets self.log.info("Test wallet backup") self.restart_node(0, ['-nowallet']) for wallet_name in wallet_names: self.nodes[0].loadwallet(wallet_name) for wallet_name in wallet_names: rpc = self.nodes[0].get_wallet_rpc(wallet_name) addr = rpc.getnewaddress() backup = os.path.join(self.options.tmpdir, 'backup.dat') rpc.backupwallet(backup) self.nodes[0].unloadwallet(wallet_name) shutil.copyfile(empty_wallet, wallet_file(wallet_name)) self.nodes[0].loadwallet(wallet_name) assert_equal(rpc.getaddressinfo(addr)['ismine'], False) self.nodes[0].unloadwallet(wallet_name) shutil.copyfile(backup, wallet_file(wallet_name)) self.nodes[0].loadwallet(wallet_name) assert_equal(rpc.getaddressinfo(addr)['ismine'], True) # Test .walletlock file is closed self.start_node(1) wallet = os.path.join(self.options.tmpdir, 'my_wallet') self.nodes[0].createwallet(wallet) assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet) self.nodes[0].unloadwallet(wallet) self.nodes[1].loadwallet(wallet) if __name__ == '__main__': MultiWalletTest().main()