diff --git a/test/functional/abc-transaction-ordering.py b/test/functional/abc-transaction-ordering.py index 43cba40a8..e282ee807 100755 --- a/test/functional/abc-transaction-ordering.py +++ b/test/functional/abc-transaction-ordering.py @@ -1,227 +1,227 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks that the node software accepts transactions in non topological order once the feature is activated. """ import random import time from collections import deque from test_framework.blocktools import ( create_block, create_coinbase, make_conform_to_ctor, ) from test_framework.messages import COutPoint, CTransaction, CTxIn, CTxOut from test_framework.p2p import P2PDataStore from test_framework.script import OP_RETURN, OP_TRUE, CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal -class PreviousSpendableOutput(): +class PreviousSpendableOutput: def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending class TransactionOrderingTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} self.extra_args = [['-whitelist=noban@127.0.0.1']] def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) def next_block(self, number, spend=None, tx_count=0): if self.tip is None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height) coinbase.rehash() if spend is None: # We need to have something to spend to fill the block. block = create_block(base_block_hash, coinbase, block_time) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # Make sure we have plenty enough to spend going forward. spendable_outputs = deque([spend]) def get_base_transaction(): # Create the new transaction tx = CTransaction() # Spend from one of the spendable outputs spend = spendable_outputs.popleft() tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n))) # Add spendable outputs for i in range(4): tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) spendable_outputs.append(PreviousSpendableOutput(tx, i)) # Put some random data into the transaction in order to randomize ids. # This also ensures that transaction are larger than 100 bytes. rand = random.getrandbits(256) tx.vout.append(CTxOut(0, CScript([rand, OP_RETURN]))) return tx tx = get_base_transaction() # Make it the same format as transaction added for padding and save the size. # It's missing the padding output, so we add a constant to account # for it. tx.rehash() # Add the transaction to the block self.add_transactions_to_block(block, [tx]) # If we have a transaction count requirement, just fill the block # until we get there while len(block.vtx) < tx_count: # Create the new transaction and add it. tx = get_base_transaction() self.add_transactions_to_block(block, [tx]) # Now that we added a bunch of transaction, we need to recompute # the merkle root. block.hashMerkleRoot = block.calc_merkle_root() if tx_count > 0: assert_equal(len(block.vtx), tx_count) # Do PoW, which is cheap on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def set_tip(self, number: int): """ Move the tip back to a previous block. """ self.tip = self.blocks[number] def run_test(self): node = self.nodes[0] peer = node.add_p2p_connection(P2PDataStore()) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # update block state def update_block(block_number): block = self.blocks[block_number] old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() peer.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(99): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() peer.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(17): n = i + 1 peer.send_blocks_and_test([block(n)], node) peer.send_blocks_and_test([block(5556)], node) # Block with regular ordering are now rejected. peer.send_blocks_and_test([block( 5557, out[17], tx_count=16)], node, success=False, reject_reason='tx-ordering') # Rewind bad block. self.set_tip(5556) # After we activate the Nov 15, 2018 HF, transaction order is enforced. def ordered_block(block_number, spend): b = block(block_number, spend=spend, tx_count=16) make_conform_to_ctor(b) update_block(block_number) return b # Now that the fork activated, we need to order transaction per txid. peer.send_blocks_and_test([ordered_block(4445, out[17])], node) peer.send_blocks_and_test([ordered_block(4446, out[18])], node) # Generate a block with a duplicated transaction. double_tx_block = ordered_block(4447, out[19]) assert_equal(len(double_tx_block.vtx), 16) double_tx_block.vtx = double_tx_block.vtx[:8] + \ [double_tx_block.vtx[8]] + double_tx_block.vtx[8:] update_block(4447) peer.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-txns-duplicate') # Rewind bad block. self.set_tip(4446) # Check over two blocks. proper_block = ordered_block(4448, out[20]) peer.send_blocks_and_test([self.tip], node) replay_tx_block = ordered_block(4449, out[21]) assert_equal(len(replay_tx_block.vtx), 16) replay_tx_block.vtx.append(proper_block.vtx[5]) replay_tx_block.vtx = [replay_tx_block.vtx[0]] + \ sorted(replay_tx_block.vtx[1:], key=lambda tx: tx.get_id()) update_block(4449) peer.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-txns-BIP30') if __name__ == '__main__': TransactionOrderingTest().main() diff --git a/test/functional/abc_p2p_compactblocks.py b/test/functional/abc_p2p_compactblocks.py index 854989d85..d67027b02 100755 --- a/test/functional/abc_p2p_compactblocks.py +++ b/test/functional/abc_p2p_compactblocks.py @@ -1,343 +1,343 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks simple acceptance of bigger blocks via p2p. It is derived from the much more complex p2p-fullblocktest. The intention is that small tests can be derived from this one, or this one can be extended, to cover the checks done for bigger blocks (e.g. sigops limits). """ import random import time from collections import deque from test_framework.blocktools import ( create_block, create_coinbase, make_conform_to_ctor, ) from test_framework.cdefs import ONE_MEGABYTE from test_framework.messages import ( COutPoint, CTransaction, CTxIn, CTxOut, HeaderAndShortIDs, msg_cmpctblock, msg_sendcmpct, ser_compact_size, ) from test_framework.p2p import P2PDataStore, P2PInterface, p2p_lock from test_framework.script import OP_RETURN, OP_TRUE, CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal -class PreviousSpendableOutput(): +class PreviousSpendableOutput: def __init__(self, tx=CTransaction(), n=-1): self.tx = tx # the output we're spending self.n = n # TestP2PConn: A peer we use to send messages to bitcoind, and store responses. class TestP2PConn(P2PInterface): def __init__(self): self.last_sendcmpct = None self.last_cmpctblock = None self.last_getheaders = None self.last_headers = None super().__init__() def on_sendcmpct(self, message): self.last_sendcmpct = message def on_cmpctblock(self, message): self.last_cmpctblock = message self.last_cmpctblock.header_and_shortids.header.calc_sha256() def on_getheaders(self, message): self.last_getheaders = message def on_headers(self, message): self.last_headers = message for x in self.last_headers.headers: x.calc_sha256() def clear_block_data(self): with p2p_lock: self.last_sendcmpct = None self.last_cmpctblock = None class FullBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} self.excessive_block_size = 16 * ONE_MEGABYTE self.extra_args = [['-whitelist=noban@127.0.0.1', '-limitancestorcount=999999', '-limitancestorsize=999999', '-limitdescendantcount=999999', '-limitdescendantsize=999999', '-maxmempool=99999', '-excessiveblocksize={}'.format( self.excessive_block_size), '-acceptnonstdtxn=1']] # UBSAN will cause this test to timeout without this. self.rpc_timeout = 180 def add_options(self, parser): super().add_options(parser) parser.add_argument( "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) def next_block(self, number, spend=None, script=CScript( [OP_TRUE]), block_size=0, extra_txns=0): if self.tip is None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height) coinbase.rehash() if spend is None: # We need to have something to spend to fill the block. assert_equal(block_size, 0) block = create_block(base_block_hash, coinbase, block_time) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # Make sure we have plenty enough to spend going forward. spendable_outputs = deque([spend]) def get_base_transaction(): # Create the new transaction tx = CTransaction() # Spend from one of the spendable outputs spend = spendable_outputs.popleft() tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n))) # Add spendable outputs for i in range(4): tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) spendable_outputs.append(PreviousSpendableOutput(tx, i)) pad_tx(tx) return tx tx = get_base_transaction() # Make it the same format as transaction added for padding and save the size. # It's missing the padding output, so we add a constant to account # for it. tx.rehash() # If a specific script is required, add it. if script is not None: tx.vout.append(CTxOut(1, script)) # Put some random data into the first transaction of the chain to # randomize ids. tx.vout.append( CTxOut(0, CScript([random.randint(0, 256), OP_RETURN]))) # Add the transaction to the block self.add_transactions_to_block(block, [tx]) # Add transaction until we reach the expected transaction count for _ in range(extra_txns): self.add_transactions_to_block(block, [get_base_transaction()]) # If we have a block size requirement, just fill # the block until we get there current_block_size = len(block.serialize()) overage_bytes = 0 while current_block_size < block_size: # We will add a new transaction. That means the size of # the field enumerating how many transaction go in the block # may change. current_block_size -= len(ser_compact_size(len(block.vtx))) current_block_size += len(ser_compact_size(len(block.vtx) + 1)) # Add padding to fill the block. left_to_fill = block_size - current_block_size # Don't go over the 1 mb limit for a txn if left_to_fill > 500000: # Make sure we eat up non-divisible by 100 amounts quickly # Also keep transaction less than 1 MB left_to_fill = 500000 + left_to_fill % 100 # Create the new transaction tx = get_base_transaction() pad_tx(tx, left_to_fill - overage_bytes) if len(tx.serialize()) + current_block_size > block_size: # Our padding was too big try again overage_bytes += 1 continue # Add the tx to the list of transactions to be included # in the block. self.add_transactions_to_block(block, [tx]) current_block_size += len(tx.serialize()) # Now that we added a bunch of transaction, we need to recompute # the merkle root. make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() # Check that the block size is what's expected if block_size > 0: assert_equal(len(block.serialize()), block_size) # Do PoW, which is cheap on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def run_test(self): node = self.nodes[0] default_p2p = node.add_p2p_connection(P2PDataStore()) test_p2p = node.add_p2p_connection(TestP2PConn()) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() default_p2p.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(99): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() # Get to one block of the May 15, 2018 HF activation for i in range(6): block(5100 + i) maturity_blocks.append(self.tip) # Send it all to the node at once. default_p2p.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Check that compact block also work for big blocks # Wait for SENDCMPCT def received_sendcmpct(): return (test_p2p.last_sendcmpct is not None) self.wait_until(received_sendcmpct, timeout=30) test_p2p.send_and_ping(msg_sendcmpct(announce=True, version=1)) # Exchange headers def received_getheaders(): return (test_p2p.last_getheaders is not None) self.wait_until(received_getheaders, timeout=30) # Return the favor test_p2p.send_message(test_p2p.last_getheaders) # Wait for the header list def received_headers(): return (test_p2p.last_headers is not None) self.wait_until(received_headers, timeout=30) # It's like we know about the same headers ! test_p2p.send_message(test_p2p.last_headers) # Send a block b1 = block(1, spend=out[0], block_size=ONE_MEGABYTE + 1) default_p2p.send_blocks_and_test([self.tip], node) # Checks the node to forward it via compact block def received_block(): return (test_p2p.last_cmpctblock is not None) self.wait_until(received_block, timeout=30) # Was it our block ? cmpctblk_header = test_p2p.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert cmpctblk_header.sha256 == b1.sha256 # Send a large block with numerous transactions. test_p2p.clear_block_data() b2 = block(2, spend=out[1], extra_txns=70000, block_size=self.excessive_block_size - 1000) default_p2p.send_blocks_and_test([self.tip], node) # Checks the node forwards it via compact block self.wait_until(received_block, timeout=30) # Was it our block ? cmpctblk_header = test_p2p.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert cmpctblk_header.sha256 == b2.sha256 # In order to avoid having to resend a ton of transactions, we invalidate # b2, which will send all its transactions in the mempool. Note that this # assumes reorgs will insert low-fee transactions back into the # mempool. node.invalidateblock(node.getbestblockhash()) # Let's send a compact block and see if the node accepts it. # Let's modify b2 and use it so that we can reuse the mempool. tx = b2.vtx[0] tx.vout.append(CTxOut(0, CScript([random.randint(0, 256), OP_RETURN]))) tx.rehash() b2.vtx[0] = tx b2.hashMerkleRoot = b2.calc_merkle_root() b2.solve() # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b2) test_p2p.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert int(node.getbestblockhash(), 16) == b2.sha256 if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/abc_p2p_fullblocktest.py b/test/functional/abc_p2p_fullblocktest.py index 258ffce41..1ce9e263d 100755 --- a/test/functional/abc_p2p_fullblocktest.py +++ b/test/functional/abc_p2p_fullblocktest.py @@ -1,256 +1,256 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks simple acceptance of bigger blocks via p2p. It is derived from the much more complex p2p-fullblocktest. The intention is that small tests can be derived from this one, or this one can be extended, to cover the checks done for bigger blocks (e.g. sigops limits). """ import random import time from collections import deque from test_framework.blocktools import ( create_block, create_coinbase, make_conform_to_ctor, ) from test_framework.cdefs import ONE_MEGABYTE from test_framework.messages import ( COutPoint, CTransaction, CTxIn, CTxOut, ToHex, ser_compact_size, ) from test_framework.p2p import P2PDataStore from test_framework.script import OP_RETURN, OP_TRUE, CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal -class PreviousSpendableOutput(): +class PreviousSpendableOutput: def __init__(self, tx=CTransaction(), n=-1): self.tx = tx # the output we're spending self.n = n class FullBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} self.excessive_block_size = 100 * ONE_MEGABYTE self.extra_args = [['-whitelist=noban@127.0.0.1', "-excessiveblocksize={}".format(self.excessive_block_size)]] self.supports_cli = False # The default timeout is not enough when submitting large blocks with # TSAN enabled self.rpc_timeout = 360 def add_options(self, parser): super().add_options(parser) parser.add_argument( "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) def next_block(self, number, spend=None, script=CScript([OP_TRUE]), block_size=0): if self.tip is None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height) coinbase.rehash() if spend is None: # We need to have something to spend to fill the block. assert_equal(block_size, 0) block = create_block(base_block_hash, coinbase, block_time) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) # Make sure we have plenty engough to spend going forward. spendable_outputs = deque([spend]) def get_base_transaction(): # Create the new transaction tx = CTransaction() # Spend from one of the spendable outputs spend = spendable_outputs.popleft() tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n))) # Add spendable outputs for i in range(4): tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) spendable_outputs.append(PreviousSpendableOutput(tx, i)) return tx tx = get_base_transaction() # Make it the same format as transaction added for padding and save the size. # It's missing the padding output, so we add a constant to account # for it. tx.rehash() base_tx_size = len(tx.serialize()) + 18 # If a specific script is required, add it. if script is not None: tx.vout.append(CTxOut(1, script)) # Put some random data into the first transaction of the chain to # randomize ids. tx.vout.append( CTxOut(0, CScript([random.randint(0, 256), OP_RETURN]))) # Add the transaction to the block self.add_transactions_to_block(block, [tx]) # If we have a block size requirement, just fill # the block until we get there current_block_size = len(block.serialize()) while current_block_size < block_size: # We will add a new transaction. That means the size of # the field enumerating how many transaction go in the block # may change. current_block_size -= len(ser_compact_size(len(block.vtx))) current_block_size += len(ser_compact_size(len(block.vtx) + 1)) # Create the new transaction tx = get_base_transaction() # Add padding to fill the block. script_length = block_size - current_block_size - base_tx_size if script_length > 510000: if script_length < 1000000: # Make sure we don't find ourselves in a position where we # need to generate a transaction smaller than what we # expected. script_length = script_length // 2 else: script_length = 500000 script_pad_len = script_length script_output = CScript([b'\x00' * script_pad_len]) tx.vout.append(CTxOut(0, script_output)) # Add the tx to the list of transactions to be included # in the block. self.add_transactions_to_block(block, [tx]) current_block_size += len(tx.serialize()) # Now that we added a bunch of transaction, we need to recompute # the merkle root. make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() # Check that the block size is what's expected if block_size > 0: assert_equal(len(block.serialize()), block_size) # Do PoW, which is cheap on regnet block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def run_test(self): node = self.nodes[0] peer = node.add_p2p_connection(P2PDataStore()) self.genesis_hash = int(node.getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() peer.send_blocks_and_test([self.tip], node) # Now we need that block to mature so we can spend the coinbase. maturity_blocks = [] for i in range(99): block(5000 + i) maturity_blocks.append(self.tip) save_spendable_output() peer.send_blocks_and_test(maturity_blocks, node) # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE) peer.send_blocks_and_test([self.tip], node) # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) peer.send_blocks_and_test([self.tip], node) # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) peer.send_blocks_and_test( [self.tip], node, success=False, reject_reason='bad-blk-length', timeout=360) # Rewind bad block. self.tip = self.blocks[17] # Submit a very large block via RPC large_block = block( 33, spend=out[17], block_size=self.excessive_block_size) assert_equal(node.submitblock(ToHex(large_block)), None) if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index 03304617d..02e259d4e 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -1,231 +1,231 @@ #!/usr/bin/env python3 # Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to bitcoind. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal import http.client import json import logging import os import socket import time import urllib.parse from http import HTTPStatus HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error, http_status=None): try: errmsg = '{} ({})'.format(rpc_error['message'], rpc_error['code']) except (KeyError, TypeError): errmsg = '' super().__init__(errmsg) self.error = rpc_error self.http_status = http_status def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") -class AuthServiceProxy(): +class AuthServiceProxy: __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) user = None if self.__url.username is None else self.__url.username.encode( 'utf8') passwd = None if self.__url.password is None else self.__url.password.encode( 'utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) self.timeout = timeout self._set_conn(connection) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): # Python internal stuff raise AttributeError if self._service_name is not None: name = "{}.{}".format(self._service_name, name) return AuthServiceProxy( self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): ''' Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. ''' headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} if os.name == 'nt': # Windows somehow does not like to re-use connections # TODO: Find out why the connection would disconnect occasionally # and make it reusable on Windows # Avoid "ConnectionAbortedError: [WinError 10053] An established # connection was aborted by the software in your host machine" self._set_conn() try: self.__conn.request(method, path, postdata, headers) return self._get_response() except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError when the connection was reset # ConnectionResetError happens on FreeBSD self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() except OSError as e: retry = ( '[WinError 10053] An established connection was aborted by the software in your host machine' in str(e)) # Workaround for a bug on macOS. See # https://bugs.python.org/issue33450 retry = retry or ( '[Errno 41] Protocol wrong type for socket' in str(e)) if retry: self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 log.debug("-{}-> {} {}".format( AuthServiceProxy.__id_count, self._service_name, json.dumps( args or argsn, default=EncodeDecimal, ensure_ascii=self.ensure_ascii), )) if args and argsn: raise ValueError( 'Cannot handle both named and positional arguments') return {'version': '1.1', 'method': self._service_name, 'params': args or argsn, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): postdata = json.dumps(self.get_request( *args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) response, status = self._request( 'POST', self.__url.path, postdata.encode('utf-8')) if response['error'] is not None: raise JSONRPCException(response['error'], status) elif 'result' not in response: raise JSONRPCException({ 'code': -343, 'message': 'missing JSON-RPC result'}, status) elif status != HTTPStatus.OK: raise JSONRPCException({ 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) else: return response['result'] def batch(self, rpc_call_list): postdata = json.dumps( list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) response, status = self._request( 'POST', self.__url.path, postdata.encode('utf-8')) if status != HTTPStatus.OK: raise JSONRPCException({ 'code': -342, 'message': 'non-200 HTTP status code but no JSON-RPC error'}, status) return response def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout: raise JSONRPCException({ 'code': -344, 'message': '{!r} RPC took longer than {} seconds. Consider ' 'using larger timeout for calls that take ' 'longer to return.'.format(self._service_name, self.__conn.timeout)}) if http_response is None: raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException( {'code': -342, 'message': 'non-JSON HTTP response with \'{} {}\' from server'.format( http_response.status, http_response.reason)}, http_response.status) responsedata = http_response.read().decode('utf8') response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: log.debug("<-{}- [{:.6f}] {}".format(response["id"], elapsed, json.dumps( response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: log.debug("<-- [{:.6f}] {}".format(elapsed, responsedata)) return response, http_response.status def __truediv__(self, relative_uri): return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn) def _set_conn(self, connection=None): port = 80 if self.__url.port is None else self.__url.port if connection: self.__conn = connection self.timeout = connection.timeout elif self.__url.scheme == 'https': self.__conn = http.client.HTTPSConnection( self.__url.hostname, port, timeout=self.timeout) else: self.__conn = http.client.HTTPConnection( self.__url.hostname, port, timeout=self.timeout) diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py index c9b2238d0..d57facee3 100644 --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -1,110 +1,110 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. """ import os REFERENCE_FILENAME = 'rpc_interface.txt' -class AuthServiceProxyWrapper(): +class AuthServiceProxyWrapper: """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, name): return_val = getattr(self.auth_service_proxy_instance, name) if not isinstance(return_val, type(self.auth_service_proxy_instance)): # If proxy getattr returned an unwrapped value, do the same here. return return_val return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) self._log_call() return return_val def _log_call(self): rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: with open(self.coverage_logfile, 'a+', encoding='utf8') as f: f.write("{}\n".format(rpc_method)) def __truediv__(self, relative_uri): return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, self.coverage_logfile) def get_request(self, *args, **kwargs): self._log_call() return self.auth_service_proxy_instance.get_request(*args, **kwargs) def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) return os.path.join( dirname, "coverage.pid{}.node{}.txt".format(pid, str(n_node))) def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitcoin-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False help_output = node.help().split('\n') commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers if line and not line.startswith('='): commands.add("{}\n".format(line.split()[0])) with open(filename, 'w', encoding='utf8') as f: f.writelines(list(commands)) return True diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index d86dcf387..ebd8ba5af 100755 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,407 +1,407 @@ #!/usr/bin/env python3 # Copyright (c) 2019 Pieter Wuille # Copyright (c) 2019-2020 The Bitcoin developers """Test-only secp256k1 elliptic curve implementation WARNING: This code is slow, uses bad randomness, does not properly protect keys, and is trivially vulnerable to side channel attacks. Do not use for anything but tests. """ import hashlib import random from .util import modinv def jacobi_symbol(n, k): """Compute the Jacobi symbol of n modulo k See http://en.wikipedia.org/wiki/Jacobi_symbol """ assert k > 0 and k & 1 n %= k t = 0 while n != 0: while n & 1 == 0: n >>= 1 r = k & 7 t ^= (r == 3 or r == 5) n, k = k, n t ^= (n & k & 3 == 3) n = n % k if k == 1: return -1 if t else 1 return 0 def modsqrt(a, p): """Compute the square root of a modulo p For p = 3 mod 4, if a square root exists, it is equal to a**((p+1)/4) mod p. """ assert(p % 4 == 3) # Only p = 3 mod 4 is implemented sqrt = pow(a, (p + 1) // 4, p) if pow(sqrt, 2, p) == a % p: return sqrt return None class EllipticCurve: def __init__(self, p, a, b): """Initialize elliptic curve y^2 = x^3 + a*x + b over GF(p).""" self.p = p self.a = a % p self.b = b % p def affine(self, p1): """Convert a Jacobian point tuple p1 to affine form, or None if at infinity.""" x1, y1, z1 = p1 if z1 == 0: return None inv = modinv(z1, self.p) inv_2 = (inv**2) % self.p inv_3 = (inv_2 * inv) % self.p return ((inv_2 * x1) % self.p, (inv_3 * y1) % self.p, 1) def negate(self, p1): """Negate a Jacobian point tuple p1.""" x1, y1, z1 = p1 return (x1, (self.p - y1) % self.p, z1) def on_curve(self, p1): """Determine whether a Jacobian tuple p is on the curve (and not infinity)""" x1, y1, z1 = p1 z2 = pow(z1, 2, self.p) z4 = pow(z2, 2, self.p) return z1 != 0 and (pow(x1, 3, self.p) + self.a * x1 * z4 + self.b * z2 * z4 - pow(y1, 2, self.p)) % self.p == 0 def is_x_coord(self, x): """Test whether x is a valid X coordinate on the curve.""" x_3 = pow(x, 3, self.p) return jacobi_symbol(x_3 + self.a * x + self.b, self.p) != -1 def lift_x(self, x): """Given an X coordinate on the curve, return a corresponding affine point.""" x_3 = pow(x, 3, self.p) v = x_3 + self.a * x + self.b y = modsqrt(v, self.p) if y is None: return None return (x, y, 1) def double(self, p1): """Double a Jacobian tuple p1""" x1, y1, z1 = p1 if z1 == 0: return (0, 1, 0) y1_2 = (y1**2) % self.p y1_4 = (y1_2**2) % self.p x1_2 = (x1**2) % self.p s = (4 * x1 * y1_2) % self.p m = 3 * x1_2 if self.a: m += self.a * pow(z1, 4, self.p) m = m % self.p x2 = (m**2 - 2 * s) % self.p y2 = (m * (s - x2) - 8 * y1_4) % self.p z2 = (2 * y1 * z1) % self.p return (x2, y2, z2) def add_mixed(self, p1, p2): """Add a Jacobian tuple p1 and an affine tuple p2""" x1, y1, z1 = p1 x2, y2, z2 = p2 assert(z2 == 1) if z1 == 0: return p2 z1_2 = (z1**2) % self.p z1_3 = (z1_2 * z1) % self.p u2 = (x2 * z1_2) % self.p s2 = (y2 * z1_3) % self.p if x1 == u2: if (y1 != s2): return (0, 1, 0) return self.double(p1) h = u2 - x1 r = s2 - y1 h_2 = (h**2) % self.p h_3 = (h_2 * h) % self.p u1_h_2 = (x1 * h_2) % self.p x3 = (r**2 - h_3 - 2 * u1_h_2) % self.p y3 = (r * (u1_h_2 - x3) - y1 * h_3) % self.p z3 = (h * z1) % self.p return (x3, y3, z3) def add(self, p1, p2): """Add two Jacobian tuples p1 and p2""" x1, y1, z1 = p1 x2, y2, z2 = p2 if z1 == 0: return p2 if z2 == 0: return p1 if z1 == 1: return self.add_mixed(p2, p1) if z2 == 1: return self.add_mixed(p1, p2) z1_2 = (z1**2) % self.p z1_3 = (z1_2 * z1) % self.p z2_2 = (z2**2) % self.p z2_3 = (z2_2 * z2) % self.p u1 = (x1 * z2_2) % self.p u2 = (x2 * z1_2) % self.p s1 = (y1 * z2_3) % self.p s2 = (y2 * z1_3) % self.p if u1 == u2: if (s1 != s2): return (0, 1, 0) return self.double(p1) h = u2 - u1 r = s2 - s1 h_2 = (h**2) % self.p h_3 = (h_2 * h) % self.p u1_h_2 = (u1 * h_2) % self.p x3 = (r**2 - h_3 - 2 * u1_h_2) % self.p y3 = (r * (u1_h_2 - x3) - s1 * h_3) % self.p z3 = (h * z1 * z2) % self.p return (x3, y3, z3) def mul(self, ps): """Compute a (multi) point multiplication ps is a list of (Jacobian tuple, scalar) pairs. """ r = (0, 1, 0) for i in range(255, -1, -1): r = self.double(r) for (p, n) in ps: if ((n >> i) & 1): r = self.add(r, p) return r SECP256K1 = EllipticCurve(2**256 - 2**32 - 977, 0, 7) SECP256K1_G = ( 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8, 1) SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 -class ECPubKey(): +class ECPubKey: """A secp256k1 public key""" def __init__(self): """Construct an uninitialized public key""" self.valid = False def set(self, data): """Construct a public key from a serialization in compressed or uncompressed format""" if (len(data) == 65 and data[0] == 0x04): p = (int.from_bytes(data[1:33], 'big'), int.from_bytes(data[33:65], 'big'), 1) self.valid = SECP256K1.on_curve(p) if self.valid: self.p = p self.compressed = False elif (len(data) == 33 and (data[0] == 0x02 or data[0] == 0x03)): x = int.from_bytes(data[1:33], 'big') if SECP256K1.is_x_coord(x): p = SECP256K1.lift_x(x) if (p[1] & 1) != (data[0] & 1): p = SECP256K1.negate(p) self.p = p self.valid = True self.compressed = True else: self.valid = False else: self.valid = False @property def is_compressed(self): return self.compressed @property def is_valid(self): return self.valid def get_bytes(self): assert(self.valid) p = SECP256K1.affine(self.p) if p is None: return None if self.compressed: return bytes([0x02 + (p[1] & 1)]) + p[0].to_bytes(32, 'big') else: return bytes([0x04]) + p[0].to_bytes(32, 'big') + \ p[1].to_bytes(32, 'big') def verify_ecdsa(self, sig, msg, low_s=True): """Verify a strictly DER-encoded ECDSA signature against this pubkey.""" assert(self.valid) if (sig[1] + 2 != len(sig)): return False if (len(sig) < 4): return False if (sig[0] != 0x30): return False if (sig[2] != 0x02): return False rlen = sig[3] if (len(sig) < 6 + rlen): return False if rlen < 1 or rlen > 33: return False if sig[4] >= 0x80: return False if (rlen > 1 and (sig[4] == 0) and not (sig[5] & 0x80)): return False r = int.from_bytes(sig[4:4 + rlen], 'big') if (sig[4 + rlen] != 0x02): return False slen = sig[5 + rlen] if slen < 1 or slen > 33: return False if (len(sig) != 6 + rlen + slen): return False if sig[6 + rlen] >= 0x80: return False if (slen > 1 and (sig[6 + rlen] == 0) and not (sig[7 + rlen] & 0x80)): return False s = int.from_bytes(sig[6 + rlen:6 + rlen + slen], 'big') if r < 1 or s < 1 or r >= SECP256K1_ORDER or s >= SECP256K1_ORDER: return False if low_s and s >= SECP256K1_ORDER_HALF: return False z = int.from_bytes(msg, 'big') w = modinv(s, SECP256K1_ORDER) u1 = z * w % SECP256K1_ORDER u2 = r * w % SECP256K1_ORDER R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, u1), (self.p, u2)])) if R is None or R[0] != r: return False return True def verify_schnorr(self, sig, msg32): assert self.is_valid assert len(sig) == 64 assert len(msg32) == 32 Rx = sig[:32] s = int.from_bytes(sig[32:], 'big') e = int.from_bytes( hashlib.sha256( Rx + self.get_bytes() + msg32).digest(), 'big') nege = SECP256K1_ORDER - e R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, s), (self.p, nege)])) if R is None: return False if jacobi_symbol(R[1], SECP256K1.p) == -1: return False return R[0] == int.from_bytes(Rx, 'big') -class ECKey(): +class ECKey: """A secp256k1 private key""" def __init__(self): self.valid = False def set(self, secret, compressed): """Construct a private key object with given 32-byte secret and compressed flag.""" assert(len(secret) == 32) secret = int.from_bytes(secret, 'big') self.valid = (secret > 0 and secret < SECP256K1_ORDER) if self.valid: self.secret = secret self.compressed = compressed def generate(self, compressed=True): """Generate a random private key (compressed or uncompressed).""" self.set( random.randrange( 1, SECP256K1_ORDER).to_bytes( 32, 'big'), compressed) def get_bytes(self): """Retrieve the 32-byte representation of this key.""" assert(self.valid) return self.secret.to_bytes(32, 'big') @property def is_valid(self): return self.valid @property def is_compressed(self): return self.compressed def get_pubkey(self): """Compute an ECPubKey object for this secret key.""" assert(self.valid) ret = ECPubKey() p = SECP256K1.mul([(SECP256K1_G, self.secret)]) ret.p = p ret.valid = True ret.compressed = self.compressed return ret def sign_ecdsa(self, msg, low_s=True): """Construct a DER-encoded ECDSA signature with this key.""" assert(self.valid) z = int.from_bytes(msg, 'big') # Note: no RFC6979, but a simple random nonce (some tests rely on # distinct transactions for the same operation) k = random.randrange(1, SECP256K1_ORDER) R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) r = R[0] % SECP256K1_ORDER s = (modinv(k, SECP256K1_ORDER) * (z + self.secret * r)) % SECP256K1_ORDER if low_s and s > SECP256K1_ORDER_HALF: s = SECP256K1_ORDER - s rb = r.to_bytes((r.bit_length() + 8) // 8, 'big') sb = s.to_bytes((s.bit_length() + 8) // 8, 'big') return b'\x30' + \ bytes([4 + len(rb) + len(sb), 2, len(rb)]) + \ rb + bytes([2, len(sb)]) + sb def sign_schnorr(self, msg32): """Create Schnorr signature (BIP-Schnorr convention).""" assert self.valid assert len(msg32) == 32 pubkey = self.get_pubkey() assert pubkey.is_valid k = random.randrange(1, SECP256K1_ORDER) R = SECP256K1.affine(SECP256K1.mul([(SECP256K1_G, k)])) if jacobi_symbol(R[1], SECP256K1.p) == -1: k = SECP256K1_ORDER - k Rx = R[0].to_bytes(32, 'big') e = int.from_bytes( hashlib.sha256( Rx + pubkey.get_bytes() + msg32).digest(), 'big') s = (k + e * int.from_bytes(self.get_bytes(), 'big')) % SECP256K1_ORDER sig = Rx + s.to_bytes(32, 'big') assert pubkey.verify_schnorr(sig, msg32) return sig diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index d27c39341..5d8eaeeb0 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -1,2223 +1,2223 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin test framework primitive and message structures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in bitcoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization. Classes use __slots__ to ensure extraneous attributes aren't accidentally added by tests, compromising their intended effect. """ import copy import hashlib import random import socket import struct import time import unittest from base64 import b64decode, b64encode from codecs import encode from enum import IntEnum from io import BytesIO from typing import List from test_framework.siphash import siphash256 from test_framework.util import assert_equal, hex_str_to_bytes MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 MAX_BLOOM_FILTER_SIZE = 36000 MAX_BLOOM_HASH_FUNCS = 50 # 1,000,000 XEC in satoshis (legacy BCHA) COIN = 100000000 # 1 XEC in satoshis XEC = 100 MAX_MONEY = 21000000 * COIN # Maximum length of incoming protocol messages MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024 MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message NODE_NETWORK = (1 << 0) NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) # NODE_WITNESS = (1 << 3) # NODE_XTHIN = (1 << 4) # removed in v0.22.12 NODE_COMPACT_FILTERS = (1 << 6) NODE_NETWORK_LIMITED = (1 << 10) NODE_AVALANCHE = (1 << 24) MSG_TX = 1 MSG_BLOCK = 2 MSG_FILTERED_BLOCK = 3 MSG_CMPCT_BLOCK = 4 MSG_AVA_PROOF = 0x1f000001 MSG_TYPE_MASK = 0xffffffff >> 2 FILTER_TYPE_BASIC = 0 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(size): r = b"" if size < 253: r = struct.pack("B", size) elif size < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v # deser_function_name: Allow for an alternate deserialization function on the # entries in the vector. def deser_vector(f, c, deser_function_name=None): nit = deser_compact_size(f) r = [] for _ in range(nit): t = c() if deser_function_name: getattr(t, deser_function_name)(f) else: t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(v, ser_function_name=None): r = ser_compact_size(len(v)) for i in v: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for _ in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(v): r = ser_compact_size(len(v)) for i in v: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for _ in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(v): r = ser_compact_size(len(v)) for sv in v: r += ser_string(sv) return r def FromHex(obj, hex_string): """Deserialize from a hex string representation (eg from RPC)""" obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj def ToHex(obj): """Convert a binary-serializable object to hex (eg for submission via RPC)""" return obj.serialize().hex() # Objects that map to bitcoind objects, which can be serialized/deserialized class CAddress: __slots__ = ("net", "ip", "nServices", "port", "time") # see https://github.com/bitcoin/bips/blob/master/bip-0155.mediawiki NET_IPV4 = 1 ADDRV2_NET_NAME = { NET_IPV4: "IPv4" } ADDRV2_ADDRESS_LENGTH = { NET_IPV4: 4 } def __init__(self): self.time = 0 self.nServices = 1 self.net = self.NET_IPV4 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f, *, with_time=True): """Deserialize from addrv1 format (pre-BIP155)""" if with_time: # VERSION messages serialize CAddress objects without time self.time = struct.unpack("H", f.read(2))[0] def serialize(self, *, with_time=True): """Serialize in addrv1 format (pre-BIP155)""" assert self.net == self.NET_IPV4 r = b"" if with_time: # VERSION messages serialize CAddress objects without time r += struct.pack("H", self.port) return r def deserialize_v2(self, f): """Deserialize from addrv2 format (BIP155)""" self.time = struct.unpack("H", f.read(2))[0] def serialize_v2(self): """Serialize in addrv2 format (BIP155)""" assert self.net == self.NET_IPV4 r = b"" r += struct.pack("H", self.port) return r def __repr__(self): return ("CAddress(nServices=%i net=%s addr=%s port=%i)" % (self.nServices, self.ADDRV2_NET_NAME[self.net], self.ip, self.port)) class CInv: __slots__ = ("hash", "type") typemap = { 0: "Error", MSG_TX: "TX", MSG_BLOCK: "Block", MSG_FILTERED_BLOCK: "filtered Block", MSG_CMPCT_BLOCK: "CompactBlock", MSG_AVA_PROOF: "avalanche proof", } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" MAX_MONEY: return False return True def __repr__(self): return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format( self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlockHeader: __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", "nTime", "nVersion", "sha256") def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format( self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nTime, self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction: __slots__ = ("index", "tx") def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): return "PrefilledTransaction(index={}, tx={})".format( self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs: __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length", "shortids", "shortids_length") def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("> 1 self.pubkey = deser_string(f) def serialize(self) -> bytes: r = self.utxo.serialize() height_ser = self.height << 1 | int(self.is_coinbase) r += struct.pack(' bytes: return self.stake.serialize() + self.sig class AvalancheProof: __slots__ = ( "sequence", "expiration", "master", "stakes", "payout_script", "signature", "limited_proofid", "proofid") def __init__(self, sequence=0, expiration=0, master=b"", signed_stakes=None, payout_script=b"", signature=b""): self.sequence: int = sequence self.expiration: int = expiration self.master: bytes = master self.stakes: List[AvalancheSignedStake] = signed_stakes or [ AvalancheSignedStake()] self.payout_script = payout_script self.signature = signature self.limited_proofid: int = None self.proofid: int = None self.compute_proof_id() def compute_proof_id(self): """Compute Bitcoin's 256-bit hash (double SHA-256) of the serialized proof data. """ ss = struct.pack(" int: return uint256_from_str(hash256( ser_uint256(self.limited_proofid) + ser_string(self.proof_master))) def deserialize(self, f): self.limited_proofid = deser_uint256(f) self.proof_master = deser_string(f) self.levels = deser_vector(f, AvalancheDelegationLevel) self.proofid = self.compute_proofid() def serialize(self): r = b"" r += ser_uint256(self.limited_proofid) r += ser_string(self.proof_master) r += ser_vector(self.levels) return r def __repr__(self): return f"AvalancheDelegation(limitedProofId={self.limited_proofid:064x}, " \ f"proofMaster={self.proof_master.hex()}, proofid={self.proofid:064x}, " \ f"levels={self.levels})" def getid(self): h = ser_uint256(self.proofid) for level in self.levels: h = hash256(h + ser_string(level.pubkey)) return h -class AvalancheHello(): +class AvalancheHello: __slots__ = ("delegation", "sig") def __init__(self, delegation=AvalancheDelegation(), sig=b"\0" * 64): self.delegation = delegation self.sig = sig def deserialize(self, f): self.delegation.deserialize(f) self.sig = f.read(64) def serialize(self): r = b"" r += self.delegation.serialize() r += self.sig return r def __repr__(self): return "AvalancheHello(delegation={}, sig={})".format( repr(self.delegation), self.sig) def get_sighash(self, node): b = self.delegation.getid() b += struct.pack(" class msg_headers: __slots__ = ("headers",) msgtype = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers={})".format(repr(self.headers)) class msg_merkleblock: __slots__ = ("merkleblock",) msgtype = b"merkleblock" def __init__(self, merkleblock=None): if merkleblock is None: self.merkleblock = CMerkleBlock() else: self.merkleblock = merkleblock def deserialize(self, f): self.merkleblock.deserialize(f) def serialize(self): return self.merkleblock.serialize() def __repr__(self): return "msg_merkleblock(merkleblock={})".format(repr(self.merkleblock)) class msg_filterload: __slots__ = ("data", "nHashFuncs", "nTweak", "nFlags") msgtype = b"filterload" def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0): self.data = data self.nHashFuncs = nHashFuncs self.nTweak = nTweak self.nFlags = nFlags def deserialize(self, f): self.data = deser_string(f) self.nHashFuncs = struct.unpack(" 0: d = s.recv(n) if not d: raise IOError('Unexpected end of stream') rv.extend(d) n -= len(d) return rv # Implementation classes -class Socks5Configuration(): +class Socks5Configuration: """Proxy configuration.""" def __init__(self): self.addr = None # Bind address (must be set) self.af = socket.AF_INET # Bind address family self.unauth = False # Support unauthenticated self.auth = False # Support authentication -class Socks5Command(): +class Socks5Command: """Information about an incoming socks5 command.""" def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) self.atyp = atyp # Address type (one of AddressType.*) self.addr = addr # Address self.port = port # Port to connect to self.username = username self.password = password def __repr__(self): return 'Socks5Command({},{},{},{},{},{})'.format( self.cmd, self.atyp, self.addr, self.port, self.username, self.password) -class Socks5Connection(): +class Socks5Connection: def __init__(self, serv, conn): self.serv = serv self.conn = conn def handle(self): """Handle socks5 request according to RFC192.""" try: # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: raise IOError('Invalid socks version {}'.format(ver)) # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) method = None if 0x02 in methods and self.serv.conf.auth: method = 0x02 # username/password elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: raise IOError('No supported authentication method was offered') # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) username = None password = None if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: raise IOError('Invalid auth packet version {}'.format(ver)) ulen = recvall(self.conn, 1)[0] username = str(recvall(self.conn, ulen)) plen = recvall(self.conn, 1)[0] password = str(recvall(self.conn, plen)) # Send authentication response self.conn.sendall(bytearray([0x01, 0x00])) # Read connect request ver, cmd, _, atyp = recvall(self.conn, 4) if ver != 0x05: raise IOError( 'Invalid socks version {} in connect request'.format(ver)) if cmd != Command.CONNECT: raise IOError( 'Unhandled command {} in connect request'.format(cmd)) if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) elif atyp == AddressType.DOMAINNAME: n = recvall(self.conn, 1)[0] addr = recvall(self.conn, n) elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: raise IOError('Unknown address type {}'.format(atyp)) port_hi, port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall( bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) logger.info('Proxy: {}'.format(cmdin)) # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") self.serv.queue.put(e) finally: self.conn.close() -class Socks5Server(): +class Socks5Server: def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s.bind(conf.addr) self.s.listen(5) self.running = False self.thread = None self.queue = queue.Queue() # report connections and exceptions to client def run(self): while self.running: (sockconn, _) = self.s.accept() if self.running: conn = Socks5Connection(self, sockconn) thread = threading.Thread(None, conn.handle) thread.daemon = True thread.start() def start(self): assert not self.running self.running = True self.thread = threading.Thread(None, self.run) self.thread.daemon = True self.thread.start() def stop(self): self.running = False # connect to self to end run loop s = socket.socket(self.conf.af) s.connect(self.conf.addr) s.close() self.thread.join() diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 8353bc906..1ec5fc9aa 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,956 +1,956 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import collections import contextlib import decimal import errno import http.client import json import logging import os import re import shlex import subprocess import sys import tempfile import time import urllib.parse from enum import Enum from .authproxy import JSONRPCException from .descriptors import descsum_create from .messages import XEC, CTransaction, FromHex from .p2p import P2P_SUBVERSION from .util import ( EncodeDecimal, append_config, assert_equal, delete_cookie_file, get_auth_cookie, get_rpc_proxy, p2p_port, rpc_url, wait_until_helper, ) BITCOIND_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 -class TestNode(): +class TestNode: """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, *, chain, host, rpc_port, p2p_port, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, emulator=None, start_perf=False, use_valgrind=False, descriptors=False): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as the node starts. """ self.index = i self.p2p_conn_index = 1 self.datadir = datadir self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf") self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.chain = chain self.host = host self.rpc_port = rpc_port self.p2p_port = p2p_port self.name = "testnode-{}".format(i) self.rpc_timeout = timewait self.binary = bitcoind if not os.path.isfile(self.binary): raise FileNotFoundError( "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOIND= {}".format(self.binary, sys.argv[0])) self.coverage_dir = coverage_dir self.cwd = cwd self.descriptors = descriptors if extra_conf is not None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the default list # below. # For those callers that need more flexibility, they can access the # default args using the provided facilities. # Note that common args are set in the config file (see # initialize_datadir) self.extra_args = extra_args # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. # This means that starting a bitcoind using the temp dir to debug a failed test won't # spam debug.log. self.default_args = [ "-datadir=" + self.datadir, "-logtimemicros", "-logthreadnames", "-logsourcelocations", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-uacomment=" + self.name, "-noprinttoconsole", ] if use_valgrind: default_suppressions_file = os.path.join( os.path.dirname(os.path.realpath(__file__)), "..", "..", "..", "contrib", "valgrind.supp") suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE", default_suppressions_file) self.binary = "valgrind" self.bitcoind_args = [bitcoind] + self.default_args self.default_args = ["--suppressions={}".format(suppressions_file), "--gen-suppressions=all", "--exit-on-first-error=yes", "--error-exitcode=1", "--quiet"] + self.bitcoind_args if emulator is not None: if not os.path.isfile(emulator): raise FileNotFoundError( "Emulator '{}' could not be found.".format(emulator)) self.emulator = emulator if use_cli and not os.path.isfile(bitcoin_cli): raise FileNotFoundError( "Binary '{}' could not be found.\nTry setting it manually:\n\tBITCOINCLI= {}".format(bitcoin_cli, sys.argv[0])) self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.emulator) self.use_cli = use_cli self.start_perf = start_perf self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.relay_fee_cache = None self.log = logging.getLogger('TestFramework.node{}'.format(i)) # Whether to kill the node when this object goes away self.cleanup_on_exit = True # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} self.p2ps = [] self.timeout_factor = timeout_factor AddressKeyPair = collections.namedtuple( 'AddressKeyPair', ['address', 'key']) PRIV_KEYS = [ # address , privkey AddressKeyPair( 'mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), AddressKeyPair( 'msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), AddressKeyPair( 'mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), AddressKeyPair( 'mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), AddressKeyPair( 'msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), AddressKeyPair( 'n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), AddressKeyPair( 'myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), AddressKeyPair( 'mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), AddressKeyPair( 'mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), AddressKeyPair( 'mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), AddressKeyPair( 'mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), AddressKeyPair( 'mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), ] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" num_keys = len(self.PRIV_KEYS) assert self.index < num_keys, f"Only {num_keys} keys are defined, please extend TestNode.PRIV_KEYS if more are needed." return self.PRIV_KEYS[self.index] def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return "[node {}] {}".format(self.index, msg) def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr( RPCOverloadWrapper(self.cli, True, self.descriptors), name) else: assert self.rpc is not None, self._node_msg( "Error: RPC not initialized") assert self.rpc_connected, self._node_msg( "Error: No RPC connection") return getattr( RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name) def clear_default_args(self): self.default_args.clear() def extend_default_args(self, args): self.default_args.extend(args) def remove_default_args(self, args): for rm_arg in args: # Remove all occurrences of rm_arg in self.default_args: # - if the arg is a flag (-flag), then the names must match # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). self.default_args = [def_arg for def_arg in self.default_args if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args # Add a new stdout and stderr file each time bitcoind is started if stderr is None: stderr = tempfile.NamedTemporaryFile( dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile( dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout if cwd is None: cwd = self.cwd # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir, self.chain) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are # written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") p_args = [self.binary] + self.default_args + extra_args if self.emulator is not None: p_args = [self.emulator] + p_args self.process = subprocess.Popen( p_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") if self.start_perf: self._start_perf() def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( 'bitcoind exited with status {} during initialization'.format(self.process.returncode))) try: rpc = get_rpc_proxy( rpc_url( self.datadir, self.chain, self.host, self.rpc_port), self.index, # Shorter timeout to allow for one retry in case of # ETIMEDOUT timeout=self.rpc_timeout // 2, coveragedir=self.coverage_dir ) rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC # connection is up wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor) # Wait for the node to finish reindex, block import, and # loading the mempool. Usually importing happens fast or # even "immediate" when the node is started. However, there # is no guarantee and sometimes ThreadImport might finish # later. This is going to cause intermittent test failures, # because generally the tests assume the node is fully # ready after being started. # # For example, the node will reject block messages from p2p # when it is still importing with the error "Unexpected # block message received" # # The wait is done here to make tests as robust as possible # and prevent racy tests and intermittent failures as much # as possible. Some tests might not need this, but the # overhead is trivial, and the added guarantees are worth # the minimal performance cost. self.log.debug("RPC successfully started") if self.use_cli: return self.rpc = rpc self.rpc_connected = True self.url = self.rpc.url return except JSONRPCException as e: # Initialization phase # -28 RPC in warmup # -342 Service unavailable, RPC server started but is shutting down due to error if e.error['code'] != -28 and e.error['code'] != -342: raise # unknown JSON RPC exception except ConnectionResetError: # This might happen when the RPC server is in warmup, but shut down before the call to getblockcount # succeeds. Try again to properly raise the FailedToStartError pass except OSError as e: if e.errno == errno.ETIMEDOUT: # Treat identical to ConnectionResetError pass elif e.errno == errno.ECONNREFUSED: # Port not yet open? pass else: # unknown OS error raise except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; # bitcoind is still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error( "Unable to connect to bitcoind after {}s".format( self.rpc_timeout)) def wait_for_cookie_credentials(self): """Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up.""" self.log.debug("Waiting for cookie credentials") # Poll at a rate of four times per second. poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): try: get_auth_cookie(self.datadir, self.chain) self.log.debug("Cookie credentials successfully retrieved") return except ValueError: # cookie file not found and no rpcuser or rpcpassword; # bitcoind is still starting so we continue polling until # RPC credentials are retrieved pass time.sleep(1.0 / poll_per_s) self._raise_assertion_error( "Unable to retrieve cookie credentials after {}s".format( self.rpc_timeout)) def generate(self, nblocks, maxtries=1000000): self.log.debug( "TestNode.generate() dispatches `generate` call to `generatetoaddress`") return self.generatetoaddress( nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries) def get_wallet_rpc(self, wallet_name): if self.use_cli: return RPCOverloadWrapper( self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors) else: assert self.rpc is not None, self._node_msg( "Error: RPC not initialized") assert self.rpc_connected, self._node_msg( "Error: RPC not connected") wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors) def stop_node(self, expected_stderr='', *, wait=0, wait_until_stopped=True): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop(wait=wait) except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # If there are any running perf processes, stop them. for profile_name in tuple(self.perf_subprocesses.keys()): self._stop_perf(profile_name) # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError( "Unexpected stderr {} != {}".format(stderr, expected_stderr)) self.stdout.close() self.stderr.close() del self.p2ps[:] if wait_until_stopped: self.wait_until_stopped() def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( "Node returned non-zero exit code ({}) when stopping".format(return_code)) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until_helper( self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor) @contextlib.contextmanager def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2): """Assert that some debug messages are present within some timeout. Unexpected debug messages may be optionally provided to fail a test if they appear before expected messages. Note: expected_msgs must always be non-empty even if the goal is to check for unexpected_msgs. This provides a bounded scenario such that "we expect to reach some target resulting in expected_msgs without seeing unexpected_msgs. Otherwise, we are testing that something never happens, which is fundamentally not robust test logic. """ if not expected_msgs: raise AssertionError("Expected debug messages is empty") if unexpected_msgs is None: unexpected_msgs = [] time_end = time.time() + timeout * self.timeout_factor debug_log = os.path.join(self.datadir, self.chain, 'debug.log') with open(debug_log, encoding='utf-8') as dl: dl.seek(0, 2) prev_size = dl.tell() yield while True: found = True with open(debug_log, encoding='utf-8') as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for unexpected_msg in unexpected_msgs: if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE): self._raise_assertion_error( 'Unexpected message "{}" partially matches log:\n\n{}\n\n'.format( unexpected_msg, print_log)) for expected_msg in expected_msgs: if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: found = False if found: return if time.time() >= time_end: break time.sleep(0.05) self._raise_assertion_error( 'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format( str(expected_msgs), print_log)) @contextlib.contextmanager def profile_with_perf(self, profile_name: str): """ Context manager that allows easy profiling of node activity using `perf`. See `test/functional/README.md` for details on perf usage. Args: profile_name: This string will be appended to the profile data filename generated by perf. """ subp = self._start_perf(profile_name) yield if subp: self._stop_perf(profile_name) def _start_perf(self, profile_name=None): """Start a perf process to profile this node. Returns the subprocess running perf.""" subp = None def test_success(cmd): return subprocess.call( # shell=True required for pipe use below cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 if not sys.platform.startswith('linux'): self.log.warning( "Can't profile with perf; only availabe on Linux platforms") return None if not test_success('which perf'): self.log.warning( "Can't profile with perf; must install perf-tools") return None if not test_success( 'readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))): self.log.warning( "perf output won't be very useful without debug symbols compiled into bitcoind") output_path = tempfile.NamedTemporaryFile( dir=self.datadir, prefix="{}.perf.data.".format(profile_name or 'test'), delete=False, ).name cmd = [ 'perf', 'record', '-g', # Record the callgraph. # Compatibility for gcc's --fomit-frame-pointer. '--call-graph', 'dwarf', '-F', '101', # Sampling frequency in Hz. '-p', str(self.process.pid), '-o', output_path, ] subp = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.perf_subprocesses[profile_name] = subp return subp def _stop_perf(self, profile_name): """Stop (and pop) a perf subprocess.""" subp = self.perf_subprocesses.pop(profile_name) output_path = subp.args[subp.args.index('-o') + 1] subp.terminate() subp.wait(timeout=10) stderr = subp.stderr.read().decode() if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: self.log.warning( "perf couldn't collect data! Try " "'sudo sysctl -w kernel.perf_event_paranoid=-1'") else: report_cmd = "perf report -i {}".format(output_path) self.log.info("See perf output by running '{}'".format(report_cmd)) def assert_start_raises_init_error( self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) ret = self.process.wait(timeout=self.rpc_timeout) self.log.debug(self._node_msg( f'bitcoind exited with status {ret} during initialization')) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) except subprocess.TimeoutExpired: self.process.kill() self.running = False self.process = None assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s ' if expected_msg is None: assert_msg += "with an error" else: assert_msg += "with expected error " + expected_msg self._raise_assertion_error(assert_msg) def relay_fee(self, cached=True): if not self.relay_fee_cache or not cached: self.relay_fee_cache = self.getnetworkinfo()["relayfee"] return self.relay_fee_cache def calculate_fee(self, tx): """ Estimate the necessary fees (in sats) for an unsigned CTransaction assuming: - the current relayfee on node - all inputs are compressed-key p2pkh, and will be signed ecdsa or schnorr - all inputs currently unsigned (empty scriptSig) """ billable_size_estimate = tx.billable_size() # Add some padding for signatures / public keys # 107 = length of PUSH(longest_sig = 72 bytes), PUSH(pubkey = 33 bytes) billable_size_estimate += len(tx.vin) * 107 # relay_fee gives a value in XEC per kB. return int(self.relay_fee() / 1000 * billable_size_estimate * XEC) def calculate_fee_from_txid(self, txid): ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) return self.calculate_fee(ctx) def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add an inbound p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect( **kwargs, net=self.chain, timeout_factor=self.timeout_factor)() self.p2ps.append(p2p_conn) p2p_conn.wait_until( lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() # At this point we have sent our version message and received the version and verack, however the full node # has not yet received the verack from us (in reply to their version). So, the connection is not yet fully # established (fSuccessfullyConnected). # # This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the # message we send. However, it might lead to races where we are expecting to receive a message. E.g. a # transaction that will be added to the mempool as soon as we return here. # # So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds) # in comparison to the upside of making tests less fragile and # unexpected intermittent errors less likely. p2p_conn.sync_with_ping() # Consistency check that the Bitcoin ABC has received our user agent # string. This checks the node's newest peer. It could be racy if # another Bitcoin ABC node has connected since we opened our # connection, but we don't expect that to happen. assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) return p2p_conn def add_outbound_p2p_connection( self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): """Add an outbound p2p connection from node. Must be an "outbound-full-relay", "block-relay-only", "addr-fetch", "feeler" or "avalanche" connection. This method adds the p2p connection to the self.p2ps list and returns the connection to the caller. """ def addconnection_callback(address, port): self.log.debug( "Connecting to {}:{} {}".format(address, port, connection_type)) self.addconnection('{}:{}'.format(address, port), connection_type) p2p_conn.peer_accept_connection( connect_cb=addconnection_callback, connect_id=p2p_idx + 1, net=self.chain, timeout_factor=self.timeout_factor, **kwargs)() if connection_type == "feeler": # feeler connections are closed as soon as the node receives a # `version` message p2p_conn.wait_until( lambda: p2p_conn.message_count["version"] == 1, check_connected=False) p2p_conn.wait_until( lambda: not p2p_conn.is_connected, check_connected=False) else: p2p_conn.wait_for_connect() self.p2ps.append(p2p_conn) p2p_conn.wait_for_verack() p2p_conn.sync_with_ping() return p2p_conn def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION]) def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor) class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) def arg_to_cli(arg): if isinstance(arg, bool): return str(arg).lower() elif arg is None: return 'null' elif isinstance(arg, dict) or isinstance(arg, list): return json.dumps(arg, default=EncodeDecimal) else: return str(arg) -class TestNodeCLI(): +class TestNodeCLI: """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir, emulator=None): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') self.emulator = emulator def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir, self.emulator) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] assert not ( pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running bitcoin-cli {}".format(p_args[2:])) if self.emulator is not None: p_args = [self.emulator] + p_args process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match( r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") -class RPCOverloadWrapper(): +class RPCOverloadWrapper: def __init__(self, rpc, cli=False, descriptors=False): self.rpc = rpc self.is_cli = cli self.descriptors = descriptors def __getattr__(self, name): return getattr(self.rpc, name) def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None): if descriptors is None: descriptors = self.descriptors return self.__getattr__('createwallet')( wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup) def importprivkey(self, privkey, label=None, rescan=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importprivkey')(privkey, label, rescan) desc = descsum_create('combo(' + privkey + ')') req = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) def addmultisigaddress(self, nrequired, keys, label=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('addmultisigaddress')( nrequired, keys, label) cms = self.createmultisig(nrequired, keys) req = [{ 'desc': cms['descriptor'], 'timestamp': 0, 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) return cms def importpubkey(self, pubkey, label=None, rescan=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importpubkey')(pubkey, label, rescan) desc = descsum_create('combo(' + pubkey + ')') req = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] import_res = self.importdescriptors(req) if not import_res[0]['success']: raise JSONRPCException(import_res[0]['error']) def importaddress(self, address, label=None, rescan=None, p2sh=None): wallet_info = self.getwalletinfo() if 'descriptors' not in wallet_info or ( 'descriptors' in wallet_info and not wallet_info['descriptors']): return self.__getattr__('importaddress')( address, label, rescan, p2sh) is_hex = False try: int(address, 16) is_hex = True desc = descsum_create('raw(' + address + ')') except BaseException: desc = descsum_create('addr(' + address + ')') reqs = [{ 'desc': desc, 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }] if is_hex and p2sh: reqs.append({ 'desc': descsum_create('p2sh(raw(' + address + '))'), 'timestamp': 0 if rescan else 'now', 'label': label if label else '' }) import_res = self.importdescriptors(reqs) for res in import_res: if not res['success']: raise JSONRPCException(res['error']) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 31b731b5f..fc1dce077 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -1,921 +1,921 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse import configparser import datetime import json import logging import multiprocessing import os import re import shutil import subprocess import sys import tempfile import threading import time import unittest import xml.etree.ElementTree as ET from collections import deque from queue import Empty, Queue # Formatting. Default colors to empty strings. BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): # type: ignore if os.name == 'nt': import ctypes kernel32 = ctypes.windll.kernel32 # type: ignore ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4 STD_OUTPUT_HANDLE = -11 STD_ERROR_HANDLE = -12 # Enable ascii color control to stdout stdout = kernel32.GetStdHandle(STD_OUTPUT_HANDLE) stdout_mode = ctypes.c_int32() kernel32.GetConsoleMode(stdout, ctypes.byref(stdout_mode)) kernel32.SetConsoleMode( stdout, stdout_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # Enable ascii color control to stderr stderr = kernel32.GetStdHandle(STD_ERROR_HANDLE) stderr_mode = ctypes.c_int32() kernel32.GetConsoleMode(stderr, ctypes.byref(stderr_mode)) kernel32.SetConsoleMode( stderr, stderr_mode.value | ENABLE_VIRTUAL_TERMINAL_PROCESSING) # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') GREEN = ('\033[0m', '\033[0;32m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 TEST_FRAMEWORK_MODULES = [ "address", "blocktools", "messages", "muhash", "script", "txtools", "util", ] NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but # are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] TEST_PARAMS = { # Some test can be run with additional parameters. # When a test is listed here, the it will be run without parameters # as well as with additional parameters listed here. # This: # example "testName" : [["--param1", "--param2"] , ["--param3"]] # will run the test 3 times: # testName # testName --param1 --param2 # testname --param3 "rpc_bind.py": [["--ipv4"], ["--ipv6"], ["--nonloopback"]], "rpc_createmultisig.py": [["--descriptors"]], "rpc_deriveaddresses.py": [["--usecli"]], "rpc_fundrawtransaction.py": [["--descriptors"]], "rpc_rawtransaction.py": [["--descriptors"]], "rpc_signrawtransaction.py": [["--descriptors"]], # FIXME: "rpc_psbt.py": [["--descriptors"]], "wallet_address_types.py": [["--descriptors"]], "tool_wallet.py": [["--descriptors"]], "wallet_avoidreuse.py": [["--descriptors"]], "wallet_balance.py": [["--descriptors"]], # FIXME: "wallet_basic.py": [["--descriptors"]], "wallet_createwallet.py": [["--usecli"], ["--descriptors"]], "wallet_encryption.py": [["--descriptors"]], "wallet_hd.py": [["--descriptors"]], "wallet_importprunedfunds.py": [["--descriptors"]], # FIXME: "wallet_keypool.py": [["--descriptors"]], "wallet_keypool_topup.py": [["--descriptors"]], "wallet_labels.py": [["--descriptors"]], "wallet_listsinceblock.py": [["--descriptors"]], "wallet_listtransactions.py": [["--descriptors"]], "wallet_multiwallet.py": [["--usecli"]], "wallet_txn_doublespend.py": [["--mineblock"]], "wallet_txn_clone.py": [["--mineblock"]], "wallet_watchonly.py": [["--usecli"]], } # Used to limit the number of tests, when list of tests is not provided on command line # When --extended is specified, we run all tests, otherwise # we only run a test if its execution time in seconds does not exceed # EXTENDED_CUTOFF DEFAULT_EXTENDED_CUTOFF = 40 DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1 -class TestCase(): +class TestCase: """ Data structure to hold and run information necessary to launch a test case. """ def __init__(self, test_num, test_case, tests_dir, tmpdir, failfast_event, flags=None): self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_case = test_case self.test_num = test_num self.failfast_event = failfast_event self.flags = flags def run(self): if self.failfast_event.is_set(): return TestResult(self.test_num, self.test_case, "", "Skipped", 0, "", "") portseed = self.test_num portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = self.test_case.split() testdir = os.path.join("{}", "{}_{}").format( self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] start_time = time.time() process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr) process.wait() log_stdout.seek(0), log_stderr.seek(0) [stdout, stderr] = [log.read().decode('utf-8') for log in (log_stdout, log_stderr)] log_stdout.close(), log_stderr.close() if process.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif process.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" return TestResult(self.test_num, self.test_case, testdir, status, time.time() - start_time, stdout, stderr) def on_ci(): return os.getenv('TRAVIS') == 'true' or os.getenv( 'TEAMCITY_VERSION') is not None def main(): # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.join(os.path.abspath( os.path.dirname(__file__)), "..", "config.ini") config.read_file(open(configfile, encoding="utf8")) src_dir = config["environment"]["SRCDIR"] build_dir = config["environment"]["BUILDDIR"] tests_dir = os.path.join(src_dir, 'test', 'functional') # SRCDIR must be set for cdefs.py to find and parse consensus.h os.environ["SRCDIR"] = src_dir # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to ' 'the console, combined from the test framework ' 'and all test nodes.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument( '--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF, help='set the cutoff runtime for what tests get run') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS, help='how many test scripts to run in parallel.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs") parser.add_argument( '--failfast', action='store_true', help='stop execution after the first test failure') parser.add_argument('--junitoutput', '-J', help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.") parser.add_argument('--testsuitename', '-n', default='Bitcoin ABC functional tests', help="Name of the test suite, as it will appear in the logs and in the JUnit report.") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the # remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] passon_args.append("--configfile={}".format(configfile)) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) logging.info("Starting {}".format(args.testsuitename)) # Create base test directory tmpdir = os.path.join("{}", "test_runner_₿₵_🏃_{:%Y%m%d_%H%M%S}").format( args.tmpdirprefix, datetime.datetime.now()) os.makedirs(tmpdir) logging.debug("Temporary test directory at {}".format(tmpdir)) if args.junitoutput and not os.path.isabs(args.junitoutput): args.junitoutput = os.path.join(tmpdir, args.junitoutput) enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if not enable_bitcoind: print("No functional tests to run.") print("Rerun ./configure with --with-daemon and then make") sys.exit(0) # Build list of tests all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS) # Check all tests with parameters actually exist for test in TEST_PARAMS: if test not in all_scripts: print("ERROR: Test with parameter {} does not exist, check it has " "not been renamed or deleted".format(test)) sys.exit(1) if tests: # Individual tests have been specified. Run specified tests that exist # in the all_scripts list. Accept the name with or without .py # extension. individual_tests = [ re.sub(r"\.py$", "", test) + ".py" for test in tests if not test.endswith('*')] test_list = [] for test in individual_tests: if test in all_scripts: test_list.append(test) else: print("{}WARNING!{} Test '{}' not found in full test list.".format( BOLD[1], BOLD[0], test)) # Allow for wildcard at the end of the name, so a single input can # match multiple tests for test in tests: if test.endswith('*'): test_list.extend( [t for t in all_scripts if t.startswith(test[:-1])]) # do not cut off explicitly specified tests cutoff = sys.maxsize else: # Run base tests only test_list = all_scripts cutoff = sys.maxsize if args.extended else args.cutoff # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: exclude_tests = [re.sub(r"\.py$", "", test) + (".py" if ".py" not in test else "") for test in args.exclude.split(',')] for exclude_test in exclude_tests: if exclude_test in test_list: test_list.remove(exclude_test) else: print("{}WARNING!{} Test '{}' not found in current test list.".format( BOLD[1], BOLD[0], exclude_test)) # Update timings from build_dir only if separate build directory is used. # We do not want to pollute source directory. build_timings = None if (src_dir != build_dir): build_timings = Timings(os.path.join(build_dir, 'timing.json')) # Always use timings from scr_dir if present src_timings = Timings(os.path.join( src_dir, "test", "functional", 'timing.json')) # Add test parameters and remove long running tests if needed test_list = get_tests_to_run( test_list, TEST_PARAMS, cutoff, src_timings) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script # and exit. parser.print_help() subprocess.check_call( [sys.executable, os.path.join(tests_dir, test_list[0]), '-h']) sys.exit(0) check_script_prefixes(all_scripts) if not args.keepcache: shutil.rmtree(os.path.join(build_dir, "test", "cache"), ignore_errors=True) run_tests( test_list, build_dir, tests_dir, args.junitoutput, tmpdir, num_jobs=args.jobs, test_suite_name=args.testsuitename, enable_coverage=args.coverage, args=passon_args, combined_logs_len=args.combinedlogslen, build_timings=build_timings, failfast=args.failfast ) def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name, enable_coverage=False, args=None, combined_logs_len=0, build_timings=None, failfast=False): args = args or [] # Warn if bitcoind is already running try: # pgrep exits with code zero when one or more matching processes found if subprocess.run(["pgrep", "-x", "bitcoind"], stdout=subprocess.DEVNULL).returncode == 0: print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format( BOLD[1], BOLD[0])) except OSError: # pgrep not supported pass # Warn if there is a cache directory cache_dir = os.path.join(build_dir, "test", "cache") if os.path.isdir(cache_dir): print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format( BOLD[1], BOLD[0], cache_dir)) # Test Framework Tests print("Running Unit Tests for Test Framework Modules") test_framework_tests = unittest.TestSuite() for module in TEST_FRAMEWORK_MODULES: test_framework_tests.addTest( unittest.TestLoader().loadTestsFromName( "test_framework.{}".format(module))) result = unittest.TextTestRunner( verbosity=1, failfast=True).run(test_framework_tests) if not result.wasSuccessful(): logging.debug( "Early exiting after failure in TestFramework unit tests") sys.exit(False) flags = ['--cachedir={}'.format(cache_dir)] + args if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug( "Initializing coverage directory at {}".format(coverage.dir)) else: coverage = None if len(test_list) > 1 and num_jobs > 1: # Populate cache try: subprocess.check_output([sys.executable, os.path.join( tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise # Run Tests start_time = time.time() test_results = execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast) runtime = time.time() - start_time max_len_name = len(max(test_list, key=len)) print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len) if junitoutput is not None: save_results_as_junit( test_results, junitoutput, runtime, test_suite_name) if (build_timings is not None): build_timings.save_timings(test_results) if coverage: coverage_passed = coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() else: coverage_passed = True # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(map( lambda test_result: test_result.was_successful, test_results)) and coverage_passed sys.exit(not all_passed) def execute_test_processes( num_jobs, test_list, tests_dir, tmpdir, flags, failfast=False): update_queue = Queue() job_queue = Queue() failfast_event = threading.Event() test_results = [] poll_timeout = 10 # seconds ## # Define some helper functions we will need for threading. ## def handle_message(message, running_jobs): """ handle_message handles a single message from handle_test_cases """ if isinstance(message, TestCase): running_jobs.append((message.test_num, message.test_case)) print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0])) return if isinstance(message, TestResult): test_result = message running_jobs.remove((test_result.num, test_result.name)) test_results.append(test_result) if test_result.status == "Passed": print("{}{}{} passed, Duration: {} s".format( BOLD[1], test_result.name, BOLD[0], TimeResolution.seconds(test_result.time))) elif test_result.status == "Skipped": print("{}{}{} skipped".format( BOLD[1], test_result.name, BOLD[0])) else: print("{}{}{} failed, Duration: {} s\n".format( BOLD[1], test_result.name, BOLD[0], TimeResolution.seconds(test_result.time))) print(BOLD[1] + 'stdout:' + BOLD[0]) print(test_result.stdout) print(BOLD[1] + 'stderr:' + BOLD[0]) print(test_result.stderr) if failfast: logging.debug("Early exiting after test failure") failfast_event.set() return assert False, "we should not be here" def handle_update_messages(): """ handle_update_messages waits for messages to be sent from handle_test_cases via the update_queue. It serializes the results so we can print nice status update messages. """ printed_status = False running_jobs = [] while True: message = None try: message = update_queue.get(True, poll_timeout) if message is None: break # We printed a status message, need to kick to the next line # before printing more. if printed_status: print() printed_status = False handle_message(message, running_jobs) update_queue.task_done() except Empty: if not on_ci(): print("Running jobs: {}".format( ", ".join([j[1] for j in running_jobs])), end="\r") sys.stdout.flush() printed_status = True def handle_test_cases(): """ job_runner represents a single thread that is part of a worker pool. It waits for a test, then executes that test. It also reports start and result messages to handle_update_messages """ while True: test = job_queue.get() if test is None: break # Signal that the test is starting to inform the poor waiting # programmer update_queue.put(test) result = test.run() update_queue.put(result) job_queue.task_done() ## # Setup our threads, and start sending tasks ## # Start our result collection thread. resultCollector = threading.Thread(target=handle_update_messages) resultCollector.daemon = True resultCollector.start() # Start some worker threads for _ in range(num_jobs): t = threading.Thread(target=handle_test_cases) t.daemon = True t.start() # Push all our test cases into the job queue. for i, t in enumerate(test_list): job_queue.put(TestCase(i, t, tests_dir, tmpdir, failfast_event, flags)) # Wait for all the jobs to be completed job_queue.join() # Wait for all the results to be compiled update_queue.join() # Flush our queues so the threads exit update_queue.put(None) for _ in range(num_jobs): job_queue.put(None) return test_results def print_results(test_results, tests_dir, max_len_name, runtime, combined_logs_len): results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format( "TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=TestResult.sort_key) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) testdir = test_result.testdir if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print('{}Combine the logs and print the last {} lines ...{}'.format( BOLD[1], combined_logs_len, BOLD[0])) print('\n============') print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) print('============\n') combined_logs_args = [ sys.executable, os.path.join( tests_dir, 'combine_logs.py'), testdir] if BOLD[0]: combined_logs_args += ['--color'] combined_logs, _ = subprocess.Popen( combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate() print( "\n".join( deque( combined_logs.splitlines(), combined_logs_len))) status = TICK + "Passed" if all_passed else CROSS + "Failed" if not all_passed: results += RED[1] results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format( "ALL".ljust(max_len_name), status.ljust(9), TimeResolution.seconds(time_sum)) + BOLD[0] if not all_passed: results += RED[0] results += "Runtime: {} s\n".format(TimeResolution.seconds(runtime)) print(results) -class TestResult(): +class TestResult: """ Simple data structure to store test result values and print them properly """ def __init__(self, num, name, testdir, status, time, stdout, stderr): self.num = num self.name = name self.testdir = testdir self.status = status self.time = time self.padding = 0 self.stdout = stdout self.stderr = stderr def sort_key(self): if self.status == "Passed": return 0, self.name.lower() elif self.status == "Failed": return 2, self.name.lower() elif self.status == "Skipped": return 1, self.name.lower() def __repr__(self): if self.status == "Passed": color = GREEN glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "{} | {}{} | {} s\n".format( self.name.ljust(self.padding), glyph, self.status.ljust(7), TimeResolution.seconds(self.time)) + color[0] @property def was_successful(self): return self.status != "Failed" def get_all_scripts_from_disk(test_dir, non_scripts): """ Return all available test script from script directory (excluding NON_SCRIPTS) """ python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"]) return list(python_files - set(non_scripts)) def check_script_prefixes(all_scripts): """Check that no more than `EXPECTED_VIOLATION_COUNT` of the test scripts don't start with one of the allowed name prefixes.""" EXPECTED_VIOLATION_COUNT = 16 # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming # convention don't immediately cause the tests to fail. LEEWAY = 0 good_prefixes_re = re.compile( "(abc_)?(example|feature|interface|mempool|mining|p2p|rpc|wallet|tool)_") bad_script_names = [ script for script in all_scripts if good_prefixes_re.match(script) is None] if len(bad_script_names) < EXPECTED_VIOLATION_COUNT: print( "{}HURRAY!{} Number of functional tests violating naming convention reduced!".format( BOLD[1], BOLD[0])) print("Consider reducing EXPECTED_VIOLATION_COUNT from {} to {}".format( EXPECTED_VIOLATION_COUNT, len(bad_script_names))) elif len(bad_script_names) > EXPECTED_VIOLATION_COUNT: print( "INFO: {} tests not meeting naming conventions (expected {}):".format(len(bad_script_names), EXPECTED_VIOLATION_COUNT)) print(" {}".format("\n ".join(sorted(bad_script_names)))) assert len(bad_script_names) <= EXPECTED_VIOLATION_COUNT + \ LEEWAY, "Too many tests not following naming convention! ({} found, expected: <= {})".format( len(bad_script_names), EXPECTED_VIOLATION_COUNT) def get_tests_to_run(test_list, test_params, cutoff, src_timings): """ Returns only test that will not run longer that cutoff. Long running tests are returned first to favor running tests in parallel Timings from build directory override those from src directory """ def get_test_time(test): # Return 0 if test is unknown to always run it return next( (x['time'] for x in src_timings.existing_timings if x['name'] == test), 0) # Some tests must also be run with additional parameters. Add them to the # list. tests_with_params = [] for test_name in test_list: # always execute a test without parameters tests_with_params.append(test_name) params = test_params.get(test_name) if params is not None: tests_with_params.extend( [test_name + " " + " ".join(parameter) for parameter in params]) result = [ test for test in tests_with_params if get_test_time(test) <= cutoff] result.sort(key=lambda x: (-get_test_time(x), x)) return result -class RPCCoverage(): +class RPCCoverage: """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `bitcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir={}'.format(self.dir) def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - {}\n".format(i)) for i in sorted(uncovered))) return False else: print("All RPC commands covered.") return True def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test_framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() # Consider RPC generate covered, because it is overloaded in # test_framework/test_node.py and not seen by the coverage check. covered_cmds = set({'generate'}) if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r', encoding="utf8") as file: all_cmds.update([line.strip() for line in file.readlines()]) for root, _, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r', encoding="utf8") as file: covered_cmds.update([line.strip() for line in file.readlines()]) return all_cmds - covered_cmds def save_results_as_junit(test_results, file_name, time, test_suite_name): """ Save tests results to file in JUnit format See http://llg.cubic.org/docs/junit/ for specification of format """ e_test_suite = ET.Element("testsuite", {"name": "{}".format(test_suite_name), "tests": str(len(test_results)), # "errors": "failures": str(len([t for t in test_results if t.status == "Failed"])), "id": "0", "skipped": str(len([t for t in test_results if t.status == "Skipped"])), "time": str(TimeResolution.milliseconds(time)), "timestamp": datetime.datetime.now().isoformat('T') }) for test_result in test_results: e_test_case = ET.SubElement(e_test_suite, "testcase", {"name": test_result.name, "classname": test_result.name, "time": str(TimeResolution.milliseconds(test_result.time)) } ) if test_result.status == "Skipped": ET.SubElement(e_test_case, "skipped") elif test_result.status == "Failed": ET.SubElement(e_test_case, "failure") # no special element for passed tests ET.SubElement(e_test_case, "system-out").text = test_result.stdout ET.SubElement(e_test_case, "system-err").text = test_result.stderr ET.ElementTree(e_test_suite).write( file_name, "UTF-8", xml_declaration=True) -class Timings(): +class Timings: """ Takes care of loading, merging and saving tests execution times. """ def __init__(self, timing_file): self.timing_file = timing_file self.existing_timings = self.load_timings() def load_timings(self): if os.path.isfile(self.timing_file): with open(self.timing_file, encoding="utf8") as file: return json.load(file) else: return [] def get_merged_timings(self, new_timings): """ Return new list containing existing timings updated with new timings Tests that do not exists are not removed """ key = 'name' merged = {} for item in self.existing_timings + new_timings: if item[key] in merged: merged[item[key]].update(item) else: merged[item[key]] = item # Sort the result to preserve test ordering in file merged = list(merged.values()) merged.sort(key=lambda t, key=key: t[key]) return merged def save_timings(self, test_results): # we only save test that have passed - timings for failed test might be # wrong (timeouts or early fails) passed_results = [ test for test in test_results if test.status == 'Passed'] new_timings = list(map(lambda test: {'name': test.name, 'time': TimeResolution.seconds(test.time)}, passed_results)) merged_timings = self.get_merged_timings(new_timings) with open(self.timing_file, 'w', encoding="utf8") as file: json.dump(merged_timings, file, indent=True) class TimeResolution: @staticmethod def seconds(time_fractional_second): return round(time_fractional_second) @staticmethod def milliseconds(time_fractional_second): return round(time_fractional_second, 3) if __name__ == '__main__': main()