diff --git a/test/functional/test_framework/authproxy.py b/test/functional/test_framework/authproxy.py index cc9a024aa..ee7f1e855 100644 --- a/test/functional/test_framework/authproxy.py +++ b/test/functional/test_framework/authproxy.py @@ -1,191 +1,193 @@ # Copyright (c) 2011 Jeff Garzik # # Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: # # Copyright (c) 2007 Jan-Klaas Kollhof # # This file is part of jsonrpc. # # jsonrpc is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 2.1 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this software; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """HTTP proxy for opening RPC connection to bitcoind. AuthServiceProxy has the following improvements over python-jsonrpc's ServiceProxy class: - HTTP connections persist for the life of the AuthServiceProxy object (if server supports HTTP/1.1) - sends protocol 'version', per JSON-RPC 1.1 - sends proper, incrementing 'id' - sends Basic HTTP authentication headers - parses all JSON numbers that look like floats as Decimal - uses standard Python json lib """ import base64 import decimal import http.client import json import logging import socket import time import urllib.parse HTTP_TIMEOUT = 30 USER_AGENT = "AuthServiceProxy/0.1" log = logging.getLogger("BitcoinRPC") class JSONRPCException(Exception): def __init__(self, rpc_error): try: - errmsg = '%(message)s (%(code)i)' % rpc_error + errmsg = '{} ({})'.format(rpc_error['message'], rpc_error['code']) except (KeyError, TypeError): errmsg = '' super().__init__(errmsg) self.error = rpc_error def EncodeDecimal(o): if isinstance(o, decimal.Decimal): return str(o) raise TypeError(repr(o) + " is not JSON serializable") class AuthServiceProxy(): __id_count = 0 # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): self.__service_url = service_url self._service_name = service_name self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests self.__url = urllib.parse.urlparse(service_url) port = 80 if self.__url.port is None else self.__url.port user = None if self.__url.username is None else self.__url.username.encode( 'utf8') passwd = None if self.__url.password is None else self.__url.password.encode( 'utf8') authpair = user + b':' + passwd self.__auth_header = b'Basic ' + base64.b64encode(authpair) if connection: # Callables re-use the connection of the original proxy self.__conn = connection elif self.__url.scheme == 'https': self.__conn = http.client.HTTPSConnection( self.__url.hostname, port, timeout=timeout) else: self.__conn = http.client.HTTPConnection( self.__url.hostname, port, timeout=timeout) def __getattr__(self, name): if name.startswith('__') and name.endswith('__'): # Python internal stuff raise AttributeError if self._service_name is not None: - name = "%s.%s" % (self._service_name, name) + name = "{}.{}".format(self._service_name, name) return AuthServiceProxy(self.__service_url, name, connection=self.__conn) def _request(self, method, path, postdata): ''' Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. ''' headers = {'Host': self.__url.hostname, 'User-Agent': USER_AGENT, 'Authorization': self.__auth_header, 'Content-type': 'application/json'} try: self.__conn.request(method, path, postdata, headers) return self._get_response() except http.client.BadStatusLine as e: if e.line == "''": # if connection was closed, try again self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() else: raise except (BrokenPipeError, ConnectionResetError): # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset # ConnectionResetError happens on FreeBSD with Python 3.4 self.__conn.close() self.__conn.request(method, path, postdata, headers) return self._get_response() def get_request(self, *args, **argsn): AuthServiceProxy.__id_count += 1 - log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name, - json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + log.debug("-{}-> {} {}".format( + AuthServiceProxy.__id_count, self._service_name, json.dumps( + args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) if args and argsn: raise ValueError( 'Cannot handle both named and positional arguments') return {'version': '1.1', 'method': self._service_name, 'params': args or argsn, 'id': AuthServiceProxy.__id_count} def __call__(self, *args, **argsn): postdata = json.dumps(self.get_request( *args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) response = self._request( 'POST', self.__url.path, postdata.encode('utf-8')) if response['error'] is not None: raise JSONRPCException(response['error']) elif 'result' not in response: raise JSONRPCException({ 'code': -343, 'message': 'missing JSON-RPC result'}) else: return response['result'] def batch(self, rpc_call_list): postdata = json.dumps( list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) log.debug("--> " + postdata) return self._request('POST', self.__url.path, postdata.encode('utf-8')) def _get_response(self): req_start_time = time.time() try: http_response = self.__conn.getresponse() except socket.timeout as e: raise JSONRPCException({ 'code': -344, - 'message': '%r RPC took longer than %f seconds. Consider ' + 'message': '{!r} RPC took longer than {} seconds. Consider ' 'using larger timeout for calls that take ' - 'longer to return.' % (self._service_name, - self.__conn.timeout)}) + 'longer to return.'.format((self._service_name, + self.__conn.timeout))}) if http_response is None: raise JSONRPCException({ 'code': -342, 'message': 'missing HTTP response from server'}) content_type = http_response.getheader('Content-Type') if content_type != 'application/json': raise JSONRPCException({ - 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) + 'code': -342, 'message': 'non-JSON HTTP response with \'{} {}\' from server'.format( + http_response.status, http_response.reason)}) responsedata = http_response.read().decode('utf8') response = json.loads(responsedata, parse_float=decimal.Decimal) elapsed = time.time() - req_start_time if "error" in response and response["error"] is None: - log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps( + log.debug("<-{}- [{:.6f}] {}".format(response["id"], elapsed, json.dumps( response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) else: - log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) + log.debug("<-- [{:.6f}] {}".format(elapsed, responsedata)) return response def __truediv__(self, relative_uri): return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn) diff --git a/test/functional/test_framework/cdefs.py b/test/functional/test_framework/cdefs.py index 8d7769eed..eb0d123ee 100644 --- a/test/functional/test_framework/cdefs.py +++ b/test/functional/test_framework/cdefs.py @@ -1,99 +1,99 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Imports some application default values from source files outside the test framework, and defines equivalents of consensus parameters for the test framework. """ import os import re def get_srcdir(): """ Try to find out the base folder containing the 'src' folder. If SRCDIR is set it does a sanity check and returns that. Otherwise it goes on a search and rescue mission. Returns None if it cannot find a suitable folder. """ def contains_src(path_to_check): if not path_to_check: return False else: cand_path = os.path.join(path_to_check, 'src') return os.path.exists(cand_path) and os.path.isdir(cand_path) srcdir = os.environ.get('SRCDIR', '') if contains_src(srcdir): return srcdir # Try to work it based out on main module import sys mainmod = sys.modules['__main__'] mainmod_path = getattr(mainmod, '__file__', '') if mainmod_path and mainmod_path.endswith('.py'): maybe_top = mainmod_path while maybe_top != '/': maybe_top = os.path.abspath(os.path.dirname(maybe_top)) if contains_src(maybe_top): return maybe_top # No luck, give up. return None # Slurp in consensus.h contents _consensus_h_fh = open(os.path.join(get_srcdir(), 'src', 'consensus', 'consensus.h'), 'rt') _consensus_h_contents = _consensus_h_fh.read() _consensus_h_fh.close() # This constant is currently needed to evaluate some that are formulas ONE_MEGABYTE = 1000000 # Extract relevant default values parameters # The maximum allowed block size before the fork LEGACY_MAX_BLOCK_SIZE = ONE_MEGABYTE # Default setting for maximum allowed size for a block, in bytes DEFAULT_MAX_BLOCK_SIZE = eval( re.search(r'DEFAULT_MAX_BLOCK_SIZE = (.+);', _consensus_h_contents).group(1)) # The following consensus parameters should not be automatically imported. # They *should* cause test failures if application code is changed in ways # that violate current consensus. # The maximum allowed number of signature check operations per MB in a block # (network rule) MAX_BLOCK_SIGOPS_PER_MB = 20000 # The maximum allowed number of signature check operations per transaction # (network rule) MAX_TX_SIGOPS_COUNT = 20000 # The maximum number of sigops we're willing to relay/mine in a single tx # (policy.h constant) MAX_STANDARD_TX_SIGOPS = MAX_TX_SIGOPS_COUNT // 5 # Coinbase transaction outputs can only be spent after this number of new # blocks (network rule) COINBASE_MATURITY = 100 # Minimum size a transaction can have. MIN_TX_SIZE = 100 # Maximum bytes in a TxOut pubkey script MAX_TXOUT_PUBKEY_SCRIPT = 10000 if __name__ == "__main__": # Output values if run standalone to verify - print("DEFAULT_MAX_BLOCK_SIZE = %d (bytes)" % DEFAULT_MAX_BLOCK_SIZE) - print("MAX_BLOCK_SIGOPS_PER_MB = %d (sigops)" % MAX_BLOCK_SIGOPS_PER_MB) - print("MAX_TX_SIGOPS_COUNT = %d (sigops)" % MAX_TX_SIGOPS_COUNT) - print("COINBASE_MATURITY = %d (blocks)" % COINBASE_MATURITY) + print("DEFAULT_MAX_BLOCK_SIZE = {} (bytes)".format(DEFAULT_MAX_BLOCK_SIZE)) + print("MAX_BLOCK_SIGOPS_PER_MB = {} (sigops)".format(MAX_BLOCK_SIGOPS_PER_MB)) + print("MAX_TX_SIGOPS_COUNT = {} (sigops)".format(MAX_TX_SIGOPS_COUNT)) + print("COINBASE_MATURITY = {} (blocks)".format(COINBASE_MATURITY)) diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py index 211b71056..a2187191d 100755 --- a/test/functional/test_framework/comptool.py +++ b/test/functional/test_framework/comptool.py @@ -1,428 +1,429 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Compare two or more bitcoinds to each other. To use, create a class that implements get_tests(), and pass it in as the test generator to TestManager. get_tests() should be a python generator that returns TestInstance objects. See below for definition. TestNode behaves as follows: Configure with a BlockStore and TxStore on_inv: log the message but don't request on_headers: log the chain tip on_pong: update ping response map (for synchronization) on_getheaders: provide headers via BlockStore on_getdata: provide blocks via BlockStore """ from .mininode import * from .blockstore import BlockStore, TxStore from .util import p2p_port, wait_until import logging logger = logging.getLogger("TestFramework.comptool") global mininode_lock class RejectResult(): """Outcome that expects rejection of a transaction or block.""" def __init__(self, code, reason=b''): self.code = code self.reason = reason def match(self, other): if self.code != other.code: return False return other.reason.startswith(self.reason) def __repr__(self): - return '%i:%s' % (self.code, self.reason or '*') + return '{}:{}'.format(self.code, self.reason or '*') class TestNode(P2PInterface): def __init__(self, block_store, tx_store): super().__init__() self.bestblockhash = None self.block_store = block_store self.block_request_map = {} self.tx_store = tx_store self.tx_request_map = {} self.block_reject_map = {} self.tx_reject_map = {} # When the pingmap is non-empty we're waiting for # a response self.pingMap = {} self.lastInv = [] self.closed = False def on_close(self): self.closed = True def on_headers(self, message): if len(message.headers) > 0: best_header = message.headers[-1] best_header.calc_sha256() self.bestblockhash = best_header.sha256 def on_getheaders(self, message): response = self.block_store.headers_for( message.locator, message.hashstop) if response is not None: self.send_message(response) def on_getdata(self, message): [self.send_message(r) for r in self.block_store.get_blocks(message.inv)] [self.send_message(r) for r in self.tx_store.get_transactions(message.inv)] for i in message.inv: if i.type == 1: self.tx_request_map[i.hash] = True elif i.type == 2: self.block_request_map[i.hash] = True def on_inv(self, message): self.lastInv = [x.hash for x in message.inv] def on_pong(self, message): try: del self.pingMap[message.nonce] except KeyError: raise AssertionError( - "Got pong for unknown ping [%s]" % repr(message)) + "Got pong for unknown ping [{}]".format(repr(message))) def on_reject(self, message): if message.message == b'tx': self.tx_reject_map[message.data] = RejectResult( message.code, message.reason) if message.message == b'block': self.block_reject_map[message.data] = RejectResult( message.code, message.reason) def send_inv(self, obj): mtype = 2 if isinstance(obj, CBlock) else 1 self.send_message(msg_inv([CInv(mtype, obj.sha256)])) def send_getheaders(self): # We ask for headers from their last tip. m = msg_getheaders() m.locator = self.block_store.get_locator(self.bestblockhash) self.send_message(m) def send_header(self, header): m = msg_headers() m.headers.append(header) self.send_message(m) # This assumes BIP31 def send_ping(self, nonce): self.pingMap[nonce] = True self.send_message(msg_ping(nonce)) def received_ping_response(self, nonce): return nonce not in self.pingMap def send_mempool(self): self.lastInv = [] self.send_message(msg_mempool()) # TestInstance: # # Instances of these are generated by the test generator, and fed into the # comptool. # # "blocks_and_transactions" should be an array of # [obj, True/False/None, hash/None]: # - obj is either a CBlock, CBlockHeader, or a CTransaction, and # - the second value indicates whether the object should be accepted # into the blockchain or mempool (for tests where we expect a certain # answer), or "None" if we don't expect a certain answer and are just # comparing the behavior of the nodes being tested. # - the third value is the hash to test the tip against (if None or omitted, # use the hash of the block) # - NOTE: if a block header, no test is performed; instead the header is # just added to the block_store. This is to facilitate block delivery # when communicating with headers-first clients (when withholding an # intermediate block). # sync_every_block: if True, then each block will be inv'ed, synced, and # nodes will be tested based on the outcome for the block. If False, # then inv's accumulate until all blocks are processed (or max inv size # is reached) and then sent out in one inv message. Then the final block # will be synced across all connections, and the outcome of the final # block will be tested. # sync_every_tx: analogous to behavior for sync_every_block, except if outcome # on the final tx is None, then contents of entire mempool are compared # across all connections. (If outcome of final tx is specified as true # or false, then only the last tx is tested against outcome.) class TestInstance(): def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): self.blocks_and_transactions = objects if objects else [] self.sync_every_block = sync_every_block self.sync_every_tx = sync_every_tx class TestManager(): def __init__(self, testgen, datadir): self.test_generator = testgen self.p2p_connections = [] self.block_store = BlockStore(datadir) self.tx_store = TxStore(datadir) self.ping_counter = 1 def add_all_connections(self, nodes): for i in range(len(nodes)): # Create a p2p connection to each node node = TestNode(self.block_store, self.tx_store) node.peer_connect('127.0.0.1', p2p_port(i)) self.p2p_connections.append(node) def clear_all_connections(self): self.p2p_connections = [] def wait_for_disconnections(self): def disconnected(): return all(node.closed for node in self.p2p_connections) wait_until(disconnected, timeout=10, lock=mininode_lock) def wait_for_verack(self): return all(node.wait_for_verack() for node in self.p2p_connections) def wait_for_pings(self, counter): def received_pongs(): return all(node.received_ping_response(counter) for node in self.p2p_connections) wait_until(received_pongs, lock=mininode_lock) # sync_blocks: Wait for all connections to request the blockhash given # then send get_headers to find out the tip of each node, and synchronize # the response by using a ping (and waiting for pong with same nonce). def sync_blocks(self, blockhash, num_blocks): def blocks_requested(): return all( blockhash in node.block_request_map and node.block_request_map[blockhash] for node in self.p2p_connections ) # --> error if not requested wait_until(blocks_requested, attempts=20 * num_blocks, lock=mininode_lock) # Send getheaders message [c.send_getheaders() for c in self.p2p_connections] # Send ping and wait for response -- synchronization hack [c.send_ping(self.ping_counter) for c in self.p2p_connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 # Analogous to sync_block (see above) def sync_transaction(self, txhash, num_events): # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events) def transaction_requested(): return all( txhash in node.tx_request_map and node.tx_request_map[txhash] for node in self.p2p_connections ) # --> error if not requested wait_until(transaction_requested, attempts=20 * num_events, lock=mininode_lock) # Get the mempool [c.send_mempool() for c in self.p2p_connections] # Send ping and wait for response -- synchronization hack [c.send_ping(self.ping_counter) for c in self.p2p_connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 # Sort inv responses from each node with mininode_lock: [c.lastInv.sort() for c in self.p2p_connections] # Verify that the tip of each connection all agree with each other, and # with the expected outcome (if given) def check_results(self, blockhash, outcome): with mininode_lock: for c in self.p2p_connections: if outcome is None: if c.bestblockhash != self.p2p_connections[0].bestblockhash: return False # Check that block was rejected w/ code elif isinstance(outcome, RejectResult): if c.bestblockhash == blockhash: return False if blockhash not in c.block_reject_map: logger.error( - 'Block not in reject map: %064x' % (blockhash)) + 'Block not in reject map: {:064x}'.format(blockhash)) return False if not outcome.match(c.block_reject_map[blockhash]): - logger.error('Block rejected with %s instead of expected %s: %064x' % ( + logger.error('Block rejected with {} instead of expected {}: {:064x}'.format( c.block_reject_map[blockhash], outcome, blockhash)) return False elif ((c.bestblockhash == blockhash) != outcome): return False return True # Either check that the mempools all agree with each other, or that # txhash's presence in the mempool matches the outcome specified. # This is somewhat of a strange comparison, in that we're either comparing # a particular tx to an outcome, or the entire mempools altogether; # perhaps it would be useful to add the ability to check explicitly that # a particular tx's existence in the mempool is the same across all nodes. def check_mempool(self, txhash, outcome): with mininode_lock: for c in self.p2p_connections: if outcome is None: # Make sure the mempools agree with each other if c.lastInv != self.p2p_connections[0].lastInv: return False # Check that tx was rejected w/ code elif isinstance(outcome, RejectResult): if txhash in c.lastInv: return False if txhash not in c.tx_reject_map: - logger.error('Tx not in reject map: %064x' % (txhash)) + logger.error( + 'Tx not in reject map: {:064x}'.format(txhash)) return False if not outcome.match(c.tx_reject_map[txhash]): - logger.error('Tx rejected with %s instead of expected %s: %064x' % ( + logger.error('Tx rejected with {} instead of expected {}: {:064x}'.format( c.tx_reject_map[txhash], outcome, txhash)) return False elif ((txhash in c.lastInv) != outcome): return False return True def run(self): # Wait until verack is received self.wait_for_verack() test_number = 1 for test_instance in self.test_generator.get_tests(): # We use these variables to keep track of the last block # and last transaction in the tests, which are used # if we're not syncing on every block or every tx. [block, block_outcome, tip] = [None, None, None] [tx, tx_outcome] = [None, None] invqueue = [] for test_obj in test_instance.blocks_and_transactions: b_or_t = test_obj[0] outcome = test_obj[1] # Determine if we're dealing with a block or tx if isinstance(b_or_t, CBlock): # Block test runner block = b_or_t block_outcome = outcome tip = block.sha256 # each test_obj can have an optional third argument # to specify the tip we should compare with # (default is to use the block being tested) if len(test_obj) >= 3: tip = test_obj[2] # Add to shared block_store, set as current block # If there was an open getdata request for the block # previously, and we didn't have an entry in the # block_store, then immediately deliver, because the # node wouldn't send another getdata request while # the earlier one is outstanding. first_block_with_hash = True if self.block_store.get(block.sha256) is not None: first_block_with_hash = False with mininode_lock: self.block_store.add_block(block) for c in self.p2p_connections: if first_block_with_hash and block.sha256 in c.block_request_map and c.block_request_map[block.sha256] == True: # There was a previous request for this block hash # Most likely, we delivered a header for this block # but never had the block to respond to the getdata c.send_message(msg_block(block)) else: c.block_request_map[block.sha256] = False # Either send inv's to each node and sync, or add # to invqueue for later inv'ing. if (test_instance.sync_every_block): # if we expect success, send inv and sync every block # if we expect failure, just push the block and see what happens. if outcome == True: [c.send_inv(block) for c in self.p2p_connections] self.sync_blocks(block.sha256, 1) else: [c.send_message(msg_block(block)) for c in self.p2p_connections] [c.send_ping(self.ping_counter) for c in self.p2p_connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 if (not self.check_results(tip, outcome)): raise AssertionError( - "Test failed at test %d" % test_number) + "Test failed at test {}".format(test_number)) else: invqueue.append(CInv(2, block.sha256)) elif isinstance(b_or_t, CBlockHeader): block_header = b_or_t self.block_store.add_header(block_header) [c.send_header(block_header) for c in self.p2p_connections] else: # Tx test runner assert(isinstance(b_or_t, CTransaction)) tx = b_or_t tx_outcome = outcome # Add to shared tx store and clear map entry with mininode_lock: self.tx_store.add_transaction(tx) for c in self.p2p_connections: c.tx_request_map[tx.sha256] = False # Again, either inv to all nodes or save for later if (test_instance.sync_every_tx): [c.send_inv(tx) for c in self.p2p_connections] self.sync_transaction(tx.sha256, 1) if (not self.check_mempool(tx.sha256, outcome)): raise AssertionError( - "Test failed at test %d" % test_number) + "Test failed at test {}".format(test_number)) else: invqueue.append(CInv(1, tx.sha256)) # Ensure we're not overflowing the inv queue if len(invqueue) == MAX_INV_SZ: [c.send_message(msg_inv(invqueue)) for c in self.p2p_connections] invqueue = [] # Do final sync if we weren't syncing on every block or every tx. if (not test_instance.sync_every_block and block is not None): if len(invqueue) > 0: [c.send_message(msg_inv(invqueue)) for c in self.p2p_connections] invqueue = [] self.sync_blocks(block.sha256, len( test_instance.blocks_and_transactions)) if (not self.check_results(tip, block_outcome)): raise AssertionError( - "Block test failed at test %d" % test_number) + "Block test failed at test {}".format(test_number)) if (not test_instance.sync_every_tx and tx is not None): if len(invqueue) > 0: [c.send_message(msg_inv(invqueue)) for c in self.p2p_connections] invqueue = [] self.sync_transaction(tx.sha256, len( test_instance.blocks_and_transactions)) if (not self.check_mempool(tx.sha256, tx_outcome)): raise AssertionError( - "Mempool test failed at test %d" % test_number) + "Mempool test failed at test {}".format(test_number)) - logger.info("Test %d: PASS" % test_number) + logger.info("Test {}: PASS".format(test_number)) test_number += 1 [c.disconnect_node() for c in self.p2p_connections] self.wait_for_disconnections() self.block_store.close() self.tx_store.close() diff --git a/test/functional/test_framework/coverage.py b/test/functional/test_framework/coverage.py index 7b84ff59b..82d995287 100644 --- a/test/functional/test_framework/coverage.py +++ b/test/functional/test_framework/coverage.py @@ -1,111 +1,111 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for doing coverage analysis on the RPC interface. Provides a way to track which RPC commands are exercised during testing. """ import os REFERENCE_FILENAME = 'rpc_interface.txt' class AuthServiceProxyWrapper(): """ An object that wraps AuthServiceProxy to record specific RPC calls. """ def __init__(self, auth_service_proxy_instance, coverage_logfile=None): """ Kwargs: auth_service_proxy_instance (AuthServiceProxy): the instance being wrapped. coverage_logfile (str): if specified, write each service_name out to a file when called. """ self.auth_service_proxy_instance = auth_service_proxy_instance self.coverage_logfile = coverage_logfile def __getattr__(self, name): return_val = getattr(self.auth_service_proxy_instance, name) if not isinstance(return_val, type(self.auth_service_proxy_instance)): # If proxy getattr returned an unwrapped value, do the same here. return return_val return AuthServiceProxyWrapper(return_val, self.coverage_logfile) def __call__(self, *args, **kwargs): """ Delegates to AuthServiceProxy, then writes the particular RPC method called to a file. """ return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) self._log_call() return return_val def _log_call(self): rpc_method = self.auth_service_proxy_instance._service_name if self.coverage_logfile: with open(self.coverage_logfile, 'a+', encoding='utf8') as f: - f.write("%s\n" % rpc_method) + f.write("{}\n".format(rpc_method)) def __truediv__(self, relative_uri): return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, self.coverage_logfile) def get_request(self, *args, **kwargs): self._log_call() return self.auth_service_proxy_instance.get_request(*args, **kwargs) def get_filename(dirname, n_node): """ Get a filename unique to the test process ID and node. This file will contain a list of RPC commands covered. """ pid = str(os.getpid()) return os.path.join( - dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) + dirname, "coverage.pid{}.node{}.txt".format(pid, str(n_node))) def write_all_rpc_commands(dirname, node): """ Write out a list of all RPC functions available in `bitcoin-cli` for coverage comparison. This will only happen once per coverage directory. Args: dirname (str): temporary test dir node (AuthServiceProxy): client Returns: bool. if the RPC interface file was written. """ filename = os.path.join(dirname, REFERENCE_FILENAME) if os.path.isfile(filename): return False help_output = node.help().split('\n') commands = set() for line in help_output: line = line.strip() # Ignore blanks and headers if line and not line.startswith('='): - commands.add("%s\n" % line.split()[0]) + commands.add("{}\n".format(line.split()[0])) with open(filename, 'w', encoding='utf8') as f: f.writelines(list(commands)) return True diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index 05a78c244..13eff0158 100755 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -1,248 +1,248 @@ # Copyright (c) 2011 Sam Rushing """ECC secp256k1 OpenSSL wrapper. WARNING: This module does not mlock() secrets; your private keys may end up on disk in swap! Use with caution! This file is modified from python-bitcoinlib. """ import ctypes import ctypes.util import hashlib import sys ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library('ssl') or 'libeay32') ssl.BN_new.restype = ctypes.c_void_p ssl.BN_new.argtypes = [] ssl.BN_free.restype = None ssl.BN_free.argtypes = [ctypes.c_void_p] ssl.BN_bin2bn.restype = ctypes.c_void_p ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p] ssl.BN_CTX_free.restype = None ssl.BN_CTX_free.argtypes = [ctypes.c_void_p] ssl.BN_CTX_new.restype = ctypes.c_void_p ssl.BN_CTX_new.argtypes = [] ssl.ECDH_compute_key.restype = ctypes.c_int ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] ssl.ECDSA_sign.restype = ctypes.c_int ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] ssl.ECDSA_verify.restype = ctypes.c_int ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p] ssl.EC_KEY_free.restype = None ssl.EC_KEY_free.argtypes = [ctypes.c_void_p] ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] ssl.EC_KEY_get0_group.restype = ctypes.c_void_p ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] ssl.EC_KEY_set_private_key.restype = ctypes.c_int ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] ssl.EC_KEY_set_conv_form.restype = None ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int] ssl.EC_KEY_set_public_key.restype = ctypes.c_int ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] ssl.i2o_ECPublicKey.restype = ctypes.c_void_p ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] ssl.EC_POINT_new.restype = ctypes.c_void_p ssl.EC_POINT_new.argtypes = [ctypes.c_void_p] ssl.EC_POINT_free.restype = None ssl.EC_POINT_free.argtypes = [ctypes.c_void_p] ssl.EC_POINT_mul.restype = ctypes.c_int ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] # this specifies the curve used with ECDSA. NID_secp256k1 = 714 # from openssl/obj_mac.h SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141 SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2 # Thx to Sam Devlin for the ctypes magic 64-bit fix. def _check_result(val, func, args): if val == 0: raise ValueError else: return ctypes.c_void_p(val) ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p ssl.EC_KEY_new_by_curve_name.errcheck = _check_result class CECKey(): """Wrapper around OpenSSL's EC_KEY""" POINT_CONVERSION_COMPRESSED = 2 POINT_CONVERSION_UNCOMPRESSED = 4 def __init__(self): self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1) def __del__(self): if ssl: ssl.EC_KEY_free(self.k) self.k = None def set_secretbytes(self, secret): priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new()) group = ssl.EC_KEY_get0_group(self.k) pub_key = ssl.EC_POINT_new(group) ctx = ssl.BN_CTX_new() if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx): raise ValueError( "Could not derive public key from the supplied secret.") ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx) ssl.EC_KEY_set_private_key(self.k, priv_key) ssl.EC_KEY_set_public_key(self.k, pub_key) ssl.BN_free(priv_key) ssl.EC_POINT_free(pub_key) ssl.BN_CTX_free(ctx) return self.k def set_privkey(self, key): self.mb = ctypes.create_string_buffer(key) return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) def set_pubkey(self, key): self.mb = ctypes.create_string_buffer(key) return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) def get_privkey(self): size = ssl.i2d_ECPrivateKey(self.k, 0) mb_pri = ctypes.create_string_buffer(size) ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri))) return mb_pri.raw def get_pubkey(self): size = ssl.i2o_ECPublicKey(self.k, 0) mb = ctypes.create_string_buffer(size) ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb))) return mb.raw def get_raw_ecdh_key(self, other_pubkey): ecdh_keybuffer = ctypes.create_string_buffer(32) r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32, ssl.EC_KEY_get0_public_key(other_pubkey.k), self.k, 0) if r != 32: raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed') return ecdh_keybuffer.raw def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()): # FIXME: be warned it's not clear what the kdf should be as a default r = self.get_raw_ecdh_key(other_pubkey) return kdf(r) def sign(self, hash, low_s=True): # FIXME: need unit tests for below cases if not isinstance(hash, bytes): - raise TypeError('Hash must be bytes instance; got %r' % - hash.__class__) + raise TypeError( + 'Hash must be bytes instance; got {!r}'.format(hash.__class__)) if len(hash) != 32: raise ValueError('Hash must be exactly 32 bytes long') sig_size0 = ctypes.c_uint32() sig_size0.value = ssl.ECDSA_size(self.k) mb_sig = ctypes.create_string_buffer(sig_size0.value) result = ssl.ECDSA_sign(0, hash, len( hash), mb_sig, ctypes.byref(sig_size0), self.k) assert 1 == result assert mb_sig.raw[0] == 0x30 assert mb_sig.raw[1] == sig_size0.value - 2 total_size = mb_sig.raw[1] assert mb_sig.raw[2] == 2 r_size = mb_sig.raw[3] assert mb_sig.raw[4 + r_size] == 2 s_size = mb_sig.raw[5 + r_size] s_value = int.from_bytes( mb_sig.raw[6 + r_size:6 + r_size + s_size], byteorder='big') if (not low_s) or s_value <= SECP256K1_ORDER_HALF: return mb_sig.raw[:sig_size0.value] else: low_s_value = SECP256K1_ORDER - s_value low_s_bytes = (low_s_value).to_bytes(33, byteorder='big') while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80: low_s_bytes = low_s_bytes[1:] new_s_size = len(low_s_bytes) new_total_size_byte = ( total_size + new_s_size - s_size).to_bytes(1, byteorder='big') new_s_size_byte = (new_s_size).to_bytes(1, byteorder='big') return b'\x30' + new_total_size_byte + mb_sig.raw[2:5 + r_size] + new_s_size_byte + low_s_bytes def verify(self, hash, sig): """Verify a DER signature""" return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1 def set_compressed(self, compressed): if compressed: form = self.POINT_CONVERSION_COMPRESSED else: form = self.POINT_CONVERSION_UNCOMPRESSED ssl.EC_KEY_set_conv_form(self.k, form) class CPubKey(bytes): """An encapsulated public key Attributes: is_valid - Corresponds to CPubKey.IsValid() is_fullyvalid - Corresponds to CPubKey.IsFullyValid() is_compressed - Corresponds to CPubKey.IsCompressed() """ def __new__(cls, buf, _cec_key=None): self = super(CPubKey, cls).__new__(cls, buf) if _cec_key is None: _cec_key = CECKey() self._cec_key = _cec_key self.is_fullyvalid = _cec_key.set_pubkey(self) != 0 return self @property def is_valid(self): return len(self) > 0 @property def is_compressed(self): return len(self) == 33 def verify(self, hash, sig): return self._cec_key.verify(hash, sig) def __str__(self): return repr(self) def __repr__(self): # Always have represent as b'' so test cases don't have to # change for py2/3 if sys.version > '3': - return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) + return '{}({})'.format(self.__class__.__name__, super(CPubKey, self).__repr__()) else: - return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) + return '{}(b{})'.format(self.__class__.__name__, super(CPubKey, self).__repr__()) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 22b1472e8..fe2e1979e 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -1,1207 +1,1218 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin test framework primitive and message strcutures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in bitcoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization.""" from codecs import encode import copy import hashlib from io import BytesIO import random import socket import struct import time from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, bytes_to_hex_str MIN_VERSION_SUPPORTED = 60001 # past bip-31 for ping/pong MY_VERSION = 70014 MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MY_RELAY = 1 MAX_INV_SZ = 50000 MAX_BLOCK_BASE_SIZE = 1000000 # 1 BCH in satoshis COIN = 100000000 NODE_NETWORK = (1 << 0) # NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) # NODE_WITNESS = (1 << 3) NODE_XTHIN = (1 << 4) NODE_BITCOIN_CASH = (1 << 5) NODE_NETWORK_LIMITED = (1 << 10) # Howmuch data will be read from the network at once READ_BUFFER_SIZE = 8192 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r def deser_int_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = struct.unpack("H", f.read(2))[0] def serialize(self, with_time=True): r = b"" if with_time: r += struct.pack("H", self.port) return r def __repr__(self): - return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, - self.ip, self.port) + return "CAddress(nServices={} ip={} port={})".format( + self.nServices, self.ip, self.port) class CInv(): typemap = { 0: "Error", 1: "TX", 2: "Block", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" 21000000 * COIN: return False return True def __repr__(self): - return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \ - % (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) + return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format( + self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlockHeader(): def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): - return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \ - % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, - time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) + return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format( + self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, + time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction(): def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): - return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx)) + return "PrefilledTransaction(index={}, tx={})".format( + self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs(): def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f, False) self.nNonce = struct.unpack("= 209: self.nStartingHeight = struct.unpack("= 70001: # Relay field is optional for version 70001 onwards try: self.nRelay = struct.unpack(" class msg_headers(): command = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): - return "msg_headers(headers=%s)" % repr(self.headers) + return "msg_headers(headers={})".format(repr(self.headers)) class msg_reject(): command = b"reject" REJECT_MALFORMED = 1 def __init__(self): self.message = b"" self.code = 0 self.reason = b"" self.data = 0 def deserialize(self, f): self.message = deser_string(f) self.code = struct.unpack(" 0: self.recvbuf += t while True: msg = self._on_data() if msg == None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with mininode_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != MAGIC_BYTES[self.network]: - raise ValueError("got garbage %s" % repr(self.recvbuf)) + raise ValueError( + "got garbage {}".format(repr(self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack(" 0 or pre_connection) def handle_write(self): """asyncore callback when data should be written to the socket.""" with mininode_lock: # asyncore does not expose socket connection, only the first read/write # event, thus we must check connection manually here to know when we # actually connect if self.state == "connecting": self.handle_connect() if not self.writable(): return try: sent = self.send(self.sendbuf) except: self.handle_close() return self.sendbuf = self.sendbuf[sent:] def format_message(self, message): command = message.command data = message.serialize() tmsg = MAGIC_BYTES[self.network] tmsg += command tmsg += b"\x00" * (12 - len(command)) tmsg += struct.pack(" 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self): super().__init__() # Track number of messages of each type received and the most recent # message of each type self.message_count = defaultdict(int) self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs): super().peer_connect(*args, **kwargs) if send_version: # Send a version msg vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.send_message(vt, True) # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with mininode_lock: try: command = message.command.decode('ascii') self.message_count[command] += 1 self.last_message[command] = message getattr(self, 'on_' + command)(message) except: - print("ERROR delivering %s (%s)" % (repr(message), - sys.exc_info()[0])) + print("ERROR delivering {} ({})".format( + repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_pong(self, message): pass def on_reject(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): self.verack_received = True def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format( message.nVersion, MIN_VERSION_SUPPORTED) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_for_disconnect(self, timeout=60): def test_function(): return self.state != "connected" wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message receiving helper methods def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getdata(self, timeout=60): def test_function(): return self.last_message.get("getdata") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getheaders(self, timeout=60): def test_function(): return self.last_message.get("getheaders") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message sending helper functions def send_and_ping(self, message): self.send_message(message) self.sync_with_ping() # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): if not self.last_message.get("pong"): return False return self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 # Keep our own socket map for asyncore, so that we can track disconnects # ourselves (to workaround an issue with closing an asyncore socket when # using select) mininode_socket_map = dict() # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface, # and whenever adding anything to the send buffer (in send_message()). This # lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() class NetworkThread(threading.Thread): def __init__(self): super().__init__(name="NetworkThread") def run(self): while mininode_socket_map: # We check for whether to disconnect outside of the asyncore # loop to workaround the behavior of asyncore when using # select disconnected = [] for fd, obj in mininode_socket_map.items(): if obj.disconnect: disconnected.append(obj) [obj.handle_close() for obj in disconnected] asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) logger.debug("Network thread closing") def network_thread_start(): """Start the network thread.""" # Only one network thread may run at a time assert not network_thread_running() NetworkThread().start() def network_thread_running(): """Return whether the network thread is running.""" return any([thread.name == "NetworkThread" for thread in threading.enumerate()]) def network_thread_join(timeout=10): """Wait timeout seconds for the network thread to terminate. Throw if the network thread doesn't terminate in timeout seconds.""" network_threads = [ thread for thread in threading.enumerate() if thread.name == "NetworkThread"] assert len(network_threads) <= 1 for thread in network_threads: thread.join(timeout) assert not thread.is_alive() diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py index 5f898ec96..6058b1600 100644 --- a/test/functional/test_framework/netutil.py +++ b/test/functional/test_framework/netutil.py @@ -1,168 +1,168 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Linux network utilities. Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal """ import sys import socket import fcntl import struct import array import os from binascii import unhexlify, hexlify # STATE_ESTABLISHED = '01' # STATE_SYN_SENT = '02' # STATE_SYN_RECV = '03' # STATE_FIN_WAIT1 = '04' # STATE_FIN_WAIT2 = '05' # STATE_TIME_WAIT = '06' # STATE_CLOSE = '07' # STATE_CLOSE_WAIT = '08' # STATE_LAST_ACK = '09' STATE_LISTEN = '0A' # STATE_CLOSING = '0B' def get_socket_inodes(pid): ''' Get list of socket inodes for process pid. ''' - base = '/proc/%i/fd' % pid + base = '/proc/{}/fd'.format(pid) inodes = [] for item in os.listdir(base): target = os.readlink(os.path.join(base, item)) if target.startswith('socket:'): inodes.append(int(target[8:-1])) return inodes def _remove_empty(array): return [x for x in array if x != ''] def _convert_ip_port(array): host, port = array.split(':') # convert host from mangled-per-four-bytes form as used by kernel host = unhexlify(host) host_out = '' for x in range(0, len(host) // 4): (val,) = struct.unpack('=I', host[x * 4:(x + 1) * 4]) - host_out += '%08x' % val + host_out += '{:08x}'.format(val) return host_out, int(port, 16) def netstat(typ='tcp'): ''' Function to return a list with status of tcp connections at linux systems To get pid of all network process running on system, you must run this script as superuser ''' with open('/proc/net/' + typ, 'r', encoding='utf8') as f: content = f.readlines() content.pop(0) result = [] for line in content: # Split lines and remove empty spaces. line_array = _remove_empty(line.split(' ')) tcp_id = line_array[0] l_addr = _convert_ip_port(line_array[1]) r_addr = _convert_ip_port(line_array[2]) state = line_array[3] # Need the inode to match with process pid. inode = int(line_array[9]) nline = [tcp_id, l_addr, r_addr, state, inode] result.append(nline) return result def get_bind_addrs(pid): ''' Get bind addresses as (host,port) tuples for process pid. ''' inodes = get_socket_inodes(pid) bind_addrs = [] for conn in netstat('tcp') + netstat('tcp6'): if conn[3] == STATE_LISTEN and conn[4] in inodes: bind_addrs.append(conn[1]) return bind_addrs # from: http://code.activestate.com/recipes/439093/ def all_interfaces(): ''' Return all interfaces that are up ''' is_64bits = sys.maxsize > 2**32 struct_size = 40 if is_64bits else 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) max_possible = 8 # initial value while True: bytes = max_possible * struct_size names = array.array('B', b'\0' * bytes) outbytes = struct.unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, # SIOCGIFCONF struct.pack('iL', bytes, names.buffer_info()[0]) ))[0] if outbytes == bytes: max_possible *= 2 else: break namestr = names.tobytes() return [(namestr[i:i + 16].split(b'\0', 1)[0], socket.inet_ntoa(namestr[i + 20:i + 24])) for i in range(0, outbytes, struct_size)] def addr_to_hex(addr): ''' Convert string IPv4 or IPv6 address to binary address as returned by get_bind_addrs. Very naive implementation that certainly doesn't work for all IPv6 variants. ''' if '.' in addr: # IPv4 addr = [int(x) for x in addr.split('.')] elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i, comp in enumerate(addr): if comp == '': # skip empty component at beginning or end if i == 0 or i == (len(addr) - 1): continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) addr = sub[0] + ([0] * nullbytes) + sub[1] else: - raise ValueError('Could not parse address %s' % addr) + raise ValueError('Could not parse address {}'.format(addr)) return hexlify(bytearray(addr)).decode('ascii') def test_ipv6_local(): ''' Check for (local) IPv6 support. ''' import socket # By using SOCK_DGRAM this will not actually make a connection, but it will # fail if there is no route to IPv6 localhost. have_ipv6 = True try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.connect(('::1', 0)) except socket.error: have_ipv6 = False return have_ipv6 diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index b7ae80933..434028625 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -1,725 +1,725 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Functionality to build scripts, as well as SignatureHash(). This file is modified from python-bitcoinlib. """ from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string from .bignum import bn2vch from binascii import hexlify import hashlib import struct import sys bchr = chr bord = ord if sys.version > '3': long = int def bchr(x): return bytes([x]) def bord(x): return x MAX_SCRIPT_ELEMENT_SIZE = 520 OPCODE_NAMES = {} def hash160(s): return hashlib.new('ripemd160', sha256(s)).digest() _opcode_instances = [] class CScriptOp(int): """A single script opcode""" __slots__ = [] @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" if len(d) < 0x4c: return b'' + bchr(len(d)) + d # OP_PUSHDATA elif len(d) <= 0xff: return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1 elif len(d) <= 0xffff: return b'\x4d' + struct.pack(b'>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return bytes(bchr(len(r)) + r) class CScript(bytes): """Serialized script A bytes subclass, so you can use this directly whenever bytes are accepted. Note that this means that indexing does *not* work - you'll get an index by byte rather than opcode. This format was chosen for efficiency so that the general case would not require creating a lot of little CScriptOP objects. iter(script) however does iterate by opcode. """ @classmethod def __coerce_instance(cls, other): # Coerce other into bytes if isinstance(other, CScriptOp): other = bchr(other) elif isinstance(other, CScriptNum): if (other.value == 0): other = bchr(CScriptOp(OP_0)) else: other = CScriptNum.encode(other) elif isinstance(other, int): if 0 <= other <= 16: other = bytes(bchr(CScriptOp.encode_op_n(other))) elif other == -1: other = bytes(bchr(OP_1NEGATE)) else: other = CScriptOp.encode_op_pushdata(bn2vch(other)) elif isinstance(other, (bytes, bytearray)): other = CScriptOp.encode_op_pushdata(other) return other def __add__(self, other): # Do the coercion outside of the try block so that errors in it are # noticed. other = self.__coerce_instance(other) try: # bytes.__add__ always returns bytes instances unfortunately return CScript(super(CScript, self).__add__(other)) except TypeError: raise TypeError( - 'Can not add a %r instance to a CScript' % other.__class__) + 'Can not add a {!r} instance to a CScript'.format(other.__class__)) def join(self, iterable): # join makes no sense for a CScript() raise NotImplementedError def __new__(cls, value=b''): if isinstance(value, bytes) or isinstance(value, bytearray): return super(CScript, cls).__new__(cls, value) else: def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) def raw_iter(self): """Raw iteration Yields tuples of (opcode, data, sop_idx) so that the different possible PUSHDATA encodings can be accurately distinguished, as well as determining the exact opcode byte indexes. (sop_idx) """ i = 0 while i < len(self): sop_idx = i opcode = bord(self[i]) i += 1 if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: - pushdata_type = 'PUSHDATA(%d)' % opcode + pushdata_type = 'PUSHDATA({})'.format(opcode) datasize = opcode elif opcode == OP_PUSHDATA1: pushdata_type = 'PUSHDATA1' if i >= len(self): raise CScriptInvalidError( 'PUSHDATA1: missing data length') datasize = bord(self[i]) i += 1 elif opcode == OP_PUSHDATA2: pushdata_type = 'PUSHDATA2' if i + 1 >= len(self): raise CScriptInvalidError( 'PUSHDATA2: missing data length') datasize = bord(self[i]) + (bord(self[i + 1]) << 8) i += 2 elif opcode == OP_PUSHDATA4: pushdata_type = 'PUSHDATA4' if i + 3 >= len(self): raise CScriptInvalidError( 'PUSHDATA4: missing data length') datasize = bord(self[i]) + (bord(self[i + 1]) << 8) + \ (bord(self[i + 2]) << 16) + (bord(self[i + 3]) << 24) i += 4 else: assert False # shouldn't happen data = bytes(self[i:i + datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError( - '%s: truncated data' % pushdata_type, data) + '{}: truncated data'.format(pushdata_type, data)) i += datasize yield (opcode, data, sop_idx) def __iter__(self): """'Cooked' iteration Returns either a CScriptOP instance, an integer, or bytes, as appropriate. See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ for (opcode, data, sop_idx) in self.raw_iter(): if data is not None: yield data else: opcode = CScriptOp(opcode) if opcode.is_small_int(): yield opcode.decode_op_n() else: yield CScriptOp(opcode) def __repr__(self): # For Python3 compatibility add b before strings so testcases don't # need to change def _repr(o): if isinstance(o, bytes): - return b"x('%s')" % hexlify(o).decode('ascii') + return "x('{}')".format(hexlify(o).decode('ascii')).encode() else: return repr(o) ops = [] i = iter(self) while True: op = None try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: - op = '%s...' % (_repr(err.data), err) + op = '{}...'.format(_repr(err.data), err) break except CScriptInvalidError as err: - op = '' % err + op = ''.format(err) break except StopIteration: break finally: if op is not None: ops.append(op) - return "CScript([%s])" % ', '.join(ops) + return "CScript([{}])".format(', '.join(ops)) def GetSigOpCount(self, fAccurate): """Get the SigOp count. fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. Note that this is consensus-critical. """ n = 0 lastOpcode = OP_INVALIDOPCODE for (opcode, data, sop_idx) in self.raw_iter(): if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): n += opcode.decode_op_n() else: n += 20 lastOpcode = opcode return n SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_FORKID = 0x40 SIGHASH_ANYONECANPAY = 0x80 def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" r = b'' last_sop_idx = sop_idx = 0 skip = True for (opcode, data, sop_idx) in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx if script[sop_idx:sop_idx + len(sig)] == sig: skip = True else: skip = False if not skip: r += script[last_sop_idx:] return CScript(r) def SignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash Returns (hash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' if inIdx >= len(txTo.vin): - return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) + return (HASH_ONE, "inIdx {} out of range ({})".format(inIdx, len(txTo.vin))) txtmp = CTransaction(txTo) for txin in txtmp.vin: txin.scriptSig = b'' txtmp.vin[inIdx].scriptSig = FindAndDelete( script, CScript([OP_CODESEPARATOR])) if (hashtype & 0x1f) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 elif (hashtype & 0x1f) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): - return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) + return (HASH_ONE, "outIdx {} out of range ({})".format(outIdx, len(txtmp.vout))) tmp = txtmp.vout[outIdx] txtmp.vout = [] for i in range(outIdx): txtmp.vout.append(CTxOut(-1)) txtmp.vout.append(tmp) for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 if hashtype & SIGHASH_ANYONECANPAY: tmp = txtmp.vin[inIdx] txtmp.vin = [] txtmp.vin.append(tmp) s = txtmp.serialize() s += struct.pack(b" 0: d = s.recv(n) if not d: raise IOError('Unexpected end of stream') rv.extend(d) n -= len(d) return rv # Implementation classes class Socks5Configuration(): """Proxy configuration.""" def __init__(self): self.addr = None # Bind address (must be set) self.af = socket.AF_INET # Bind address family self.unauth = False # Support unauthenticated self.auth = False # Support authentication class Socks5Command(): """Information about an incoming socks5 command.""" def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) self.atyp = atyp # Address type (one of AddressType.*) self.addr = addr # Address self.port = port # Port to connect to self.username = username self.password = password def __repr__(self): - return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) + return 'Socks5Command({},{},{},{},{},{})'.format( + self.cmd, self.atyp, self.addr, self.port, self.username, self.password) class Socks5Connection(): def __init__(self, serv, conn, peer): self.serv = serv self.conn = conn self.peer = peer def handle(self): """Handle socks5 request according to RFC192.""" try: # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: - raise IOError('Invalid socks version %i' % ver) + raise IOError('Invalid socks version {}'.format(ver)) # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) method = None if 0x02 in methods and self.serv.conf.auth: method = 0x02 # username/password elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: raise IOError('No supported authentication method was offered') # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) username = None password = None if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: - raise IOError('Invalid auth packet version %i' % ver) + raise IOError('Invalid auth packet version {}'.format(ver)) ulen = recvall(self.conn, 1)[0] username = str(recvall(self.conn, ulen)) plen = recvall(self.conn, 1)[0] password = str(recvall(self.conn, plen)) # Send authentication response self.conn.sendall(bytearray([0x01, 0x00])) # Read connect request ver, cmd, _, atyp = recvall(self.conn, 4) if ver != 0x05: raise IOError( - 'Invalid socks version %i in connect request' % ver) + 'Invalid socks version {} in connect request'.format(ver)) if cmd != Command.CONNECT: - raise IOError('Unhandled command %i in connect request' % cmd) + raise IOError( + 'Unhandled command {} in connect request'.format(cmd)) if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) elif atyp == AddressType.DOMAINNAME: n = recvall(self.conn, 1)[0] addr = recvall(self.conn, n) elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: - raise IOError('Unknown address type %i' % atyp) + raise IOError('Unknown address type {}'.format(atyp)) port_hi, port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall( bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) - logger.info('Proxy: %s', cmdin) + logger.info('Proxy: {}'.format(cmdin)) # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") self.serv.queue.put(e) finally: self.conn.close() class Socks5Server(): def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s.bind(conf.addr) self.s.listen(5) self.running = False self.thread = None self.queue = queue.Queue() # report connections and exceptions to client def run(self): while self.running: (sockconn, peer) = self.s.accept() if self.running: conn = Socks5Connection(self, sockconn, peer) thread = threading.Thread(None, conn.handle) thread.daemon = True thread.start() def start(self): assert(not self.running) self.running = True self.thread = threading.Thread(None, self.run) self.thread.daemon = True self.thread.start() def stop(self): self.running = False # connect to self to end run loop s = socket.socket(self.conf.af) s.connect(self.conf.addr) s.close() self.thread.join() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 08343c1b3..81e0eccb7 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1,518 +1,519 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" from collections import deque from enum import Enum import logging import argparse import os import pdb import shutil import sys import tempfile import time import traceback from .authproxy import JSONRPCException from . import coverage from .test_node import TestNode from .util import ( MAX_NODES, PortSeed, assert_equal, check_json_precision, connect_nodes_bi, disconnect_nodes, initialize_datadir, log_filename, p2p_port, rpc_port, set_node_times, sync_blocks, sync_mempools, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 class BitcoinTestFramework(): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.setup_clean_chain = False self.nodes = [] self.mocktime = 0 self.supports_cli = False def main(self): """Main function. This should not be overridden by the subclass test scripts.""" parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_argument("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"), help="Source directory containing bitcoind/bitcoin-cli (default: %(default)s)") parser.add_argument("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs") parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, help="The seed to use for assigning port numbers (default: current process id)") parser.add_argument("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_argument("--configfile", dest="configfile", help="Location of the test framework config file") parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", help="use bitcoin-cli instead of RPC for all commands") self.add_options(parser) self.options = parser.parse_args() self.set_test_params() assert hasattr( self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir + ":" + \ self.options.srcdir + "/qt:" + os.environ['PATH'] check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix="test") self._start_logging() success = TestStatus.FAILED try: if self.options.usecli and not self.supports_cli: raise SkipTest( "--usecli specified but test does not support using CLI") self.setup_chain() self.setup_network() self.run_test() success = TestStatus.PASSED except JSONRPCException as e: self.log.exception("JSONRPC error") except SkipTest as e: - self.log.warning("Test Skipped: %s" % e.message) + self.log.warning("Test Skipped: {}".format(e.message)) success = TestStatus.SKIPPED except AssertionError as e: self.log.exception("Assertion failed") except KeyError as e: self.log.exception("Key error") except Exception as e: self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: self.log.warning("Exiting after keyboard interrupt") if success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: self.log.info( "Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED: self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) else: - self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) + self.log.warning( + "Not cleaning up dir {}".format(self.options.tmpdir)) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. import glob filenames = [self.options.tmpdir + "/test_framework.log"] filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log") MAX_LINES_TO_PRINT = 1000 for fn in filenames: try: with open(fn, 'r') as f: print("From", fn, ":") print("".join(deque(f, MAX_LINES_TO_PRINT))) except OSError: - print("Opening file %s failed." % fn) + print("Opening file {} failed.".format(fn)) traceback.print_exc() if success == TestStatus.PASSED: self.log.info("Tests successful") sys.exit(TEST_EXIT_PASSED) elif success == TestStatus.SKIPPED: self.log.info("Test skipped") sys.exit(TEST_EXIT_SKIPPED) else: self.log.error( - "Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) + "Test failed. Test logging available at {}/test_framework.log".format(self.options.tmpdir)) logging.shutdown() sys.exit(TEST_EXIT_FAILED) # Methods to override in subclass test scripts. def set_test_params(self): """Tests must this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. for i in range(self.num_nodes - 1): connect_nodes_bi(self.nodes[i], self.nodes[i + 1]) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = None if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test scripts. def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None): """Instantiate TestNode objects""" if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [None] * num_nodes assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, rpc_port=rpc_port(i), p2p_port=p2p_port(i), timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli)) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in self.nodes: coverage.write_all_rpc_commands( self.options.coveragedir, node.rpc) def stop_node(self, i): """Stop a bitcoind test node""" self.nodes[i].stop_node() self.nodes[i].wait_until_stopped() def stop_nodes(self): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node() for node in self.nodes: # Wait for nodes to stop node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs): with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr: try: self.start_node( i, extra_args, stderr=log_stderr, *args, **kwargs) self.stop_node(i) except Exception as e: assert 'bitcoind exited' in str(e) # node must have shutdown self.nodes[i].running = False self.nodes[i].process = None if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8') if expected_msg not in stderr: raise AssertionError( "Expected error \"" + expected_msg + "\" not found in:\n" + stderr) else: if expected_msg is None: assert_msg = "bitcoind should have exited with an error" else: assert_msg = "bitcoind should have exited with expected error " + expected_msg raise AssertionError(assert_msg) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) self.sync_all([self.nodes[:2], self.nodes[2:]]) def join_network(self): """ Join the (previously split) network halves together. """ connect_nodes_bi(self.nodes[1], self.nodes[2]) self.sync_all() def sync_all(self, node_groups=None): if not node_groups: node_groups = [self.nodes] for group in node_groups: sync_blocks(group) sync_mempools(group) def enable_mocktime(self): """Enable mocktime for the script. mocktime may be needed for scripts that use the cached version of the blockchain. If the cached version of the blockchain is used without mocktime then the mempools will not sync due to IBD. For backwared compatibility of the python scripts with previous versions of the cache, this helper function sets mocktime to Jan 1, 2014 + (201 * 10 * 60)""" self.mocktime = 1388534400 + (201 * 10 * 60) def disable_mocktime(self): self.mocktime = 0 # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit( ) else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) formatter = logging.Formatter( fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 200-block-long chain (with wallet) for MAX_NODES Afterward, create num_nodes copies from the cache.""" assert self.num_nodes <= MAX_NODES create_cache = False for i in range(MAX_NODES): if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))): create_cache = True break if create_cache: self.log.debug("Creating data directories from cached datadir") # find and delete old cache directories if any exist for i in range(MAX_NODES): if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))): shutil.rmtree(os.path.join( self.options.cachedir, "node" + str(i))) # Create cache directories, run bitcoinds: for i in range(MAX_NODES): datadir = initialize_datadir(self.options.cachedir, i) self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], host=None, rpc_port=rpc_port(i), p2p_port=p2p_port(i), timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].clear_default_args() self.nodes[i].extend_default_args([ "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]) if i > 0: self.nodes[i].extend_default_args( ["-connect=127.0.0.1:" + str(p2p_port(0))]) self.start_node(i) # Wait for RPC connections to be ready for node in self.nodes: node.wait_for_rpc_connection() # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of # initialize_chain, only 4 nodes will generate coins. # # blocks are created with timestamps 10 minutes apart # starting from 2010 minutes in the past self.enable_mocktime() block_time = self.mocktime - (201 * 10 * 60) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 10 * 60 # Must sync before next peer starts generating blocks sync_blocks(self.nodes) # Shut them down, and clean up cache directories: self.stop_nodes() self.nodes = [] self.disable_mocktime() for i in range(MAX_NODES): os.remove(log_filename(self.options.cachedir, i, "debug.log")) os.remove(log_filename( self.options.cachedir, i, "wallets/db.log")) os.remove(log_filename(self.options.cachedir, i, "peers.dat")) for i in range(self.num_nodes): from_dir = os.path.join(self.options.cachedir, "node" + str(i)) to_dir = os.path.join(self.options.tmpdir, "node" + str(i)) shutil.copytree(from_dir, to_dir) # Overwrite port/rpcport in bitcoin.conf initialize_datadir(self.options.tmpdir, i) def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i) class ComparisonTestFramework(BitcoinTestFramework): """Test framework for doing p2p comparison testing Sets up some bitcoind binaries: - 1 binary: test binary - 2 binaries: 1 test binary, 1 ref binary - n>2 binaries: 1 test binary, n-1 ref binaries""" def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def add_options(self, parser): parser.add_argument("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to test") parser.add_argument("--refbinary", dest="refbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to use for reference nodes (if any)") def setup_network(self): extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args, binary=[self.options.testbinary] + [self.options.refbinary] * (self.num_nodes - 1)) self.start_nodes() class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index d2b7f2432..4e86ac1b6 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,325 +1,325 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import decimal import errno import http.client import json import logging import os import re import subprocess import time from .authproxy import JSONRPCException from .mininode import COIN, FromHex, CTransaction from .util import ( assert_equal, get_rpc_proxy, rpc_url, wait_until, p2p_port, ) # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, dirname, extra_args, host, rpc_port, p2p_port, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False): self.index = i self.datadir = os.path.join(dirname, "node" + str(i)) self.host = host self.rpc_port = rpc_port self.p2p_port = p2p_port self.name = "testnode-{}".format(i) if timewait: self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 if binary is None: self.binary = os.getenv("BITCOIND", "bitcoind") else: self.binary = binary self.stderr = stderr self.coverage_dir = coverage_dir # Most callers will just need to add extra args to the default list # below. # For those callers that need more flexibity, they can access the # default args using the provided facilities self.extra_args = extra_args self.default_args = ["-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=" + self.name] self.cli = TestNodeCLI( os.getenv("BITCOINCLI", "bitcoin-cli"), self.datadir) self.use_cli = use_cli self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.relay_fee_cache = None - self.log = logging.getLogger('TestFramework.node%d' % i) + self.log = logging.getLogger('TestFramework.node{}'.format(i)) self.p2ps = [] def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc is not None, "Error: RPC not initialized" assert self.rpc_connected, "Error: No RPC connection" return getattr(self.rpc, name) def clear_default_args(self): self.default_args.clear() def extend_default_args(self, args): self.default_args.extend(args) def remove_default_args(self, args): for rm_arg in args: # Remove all occurrences of rm_arg in self.default_args: # - if the arg is a flag (-flag), then the names must match # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). self.default_args = [def_arg for def_arg in self.default_args if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] def start(self, extra_args=None, stderr=None, *args, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args if stderr is None: stderr = self.stderr self.process = subprocess.Popen( [self.binary] + self.default_args + extra_args, stderr=stderr, *args, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): assert self.process.poll( - ) is None, "bitcoind exited with status %i during initialization" % self.process.returncode + ) is None, "bitcoind exited with status {} during initialization".format(self.process.returncode) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.host, self.rpc_port), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) raise AssertionError("Unable to connect to bitcoind") def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc_connected assert self.rpc wallet_path = "wallet/{}".format(wallet_name) return self.rpc / wallet_path def stop_node(self): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert_equal(return_code, 0) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoind to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() def relay_fee(self, cached=True): if not self.relay_fee_cache or not cached: self.relay_fee_cache = self.getnetworkinfo()["relayfee"] return self.relay_fee_cache def calculate_fee(self, tx): # Relay fee is in satoshis per KB. Thus the 1000, and the COIN added # to get back to an amount of satoshis. billable_size_estimate = tx.billable_size() # Add some padding for signatures # NOTE: Fees must be calculated before signatures are added, # so they will never be included in the billable_size above. billable_size_estimate += len(tx.vin) * 81 return int(self.relay_fee() / 1000 * billable_size_estimate * COIN) def calculate_fee_from_txid(self, txid): ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) return self.calculate_fee(ctx) def add_p2p_connection(self, p2p_conn, *args, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(*args, **kwargs) self.p2ps.append(p2p_conn) return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, "No p2p connection" return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir): self.args = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *args, input=None): # TestNodeCLI is callable with bitcoin-cli command-line args cli = TestNodeCLI(self.binary, self.datadir) cli.args = [str(arg) for arg in args] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not ( pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.args if named_args: p_args += ["-named"] p_args += [command] + pos_args + named_args self.log.debug("Running bitcoin-cli command: {}".format(command)) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match( r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except JSONDecodeError: return cli_stdout.rstrip("\n") diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 64d909236..0f8e50ddc 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -1,634 +1,636 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Helpful routines for regression testing.""" from base64 import b64encode from binascii import hexlify, unhexlify from decimal import Decimal, ROUND_DOWN import hashlib import json import logging import os import random import re from subprocess import CalledProcessError import time from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException logger = logging.getLogger("TestFramework.utils") # Assert functions ################## def assert_fee_amount(fee, tx_size, fee_per_kB, wiggleroom=2): """ Assert the fee was in range wiggleroom defines an amount that the test expects the wallet to be off by when estimating fees. This can be due to the dummy signature that is added during fee calculation, or due to the wallet funding transactions using the ceiling of the calculated fee. """ target_fee = tx_size * fee_per_kB / 1000 if fee < (tx_size - wiggleroom) * fee_per_kB / 1000: raise AssertionError( - "Fee of %s BCH too low! (Should be %s BCH)" % (str(fee), str(target_fee))) + "Fee of {} BCH too low! (Should be {} BCH)".format(str(fee), str(target_fee))) if fee > (tx_size + wiggleroom) * fee_per_kB / 1000: raise AssertionError( - "Fee of %s BCH too high! (Should be %s BCH)" % (str(fee), str(target_fee))) + "Fee of {} BCH too high! (Should be {} BCH)".format(str(fee), str(target_fee))) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): - raise AssertionError("not(%s)" % " == ".join(str(arg) - for arg in (thing1, thing2) + args)) + raise AssertionError("not({})".format(" == ".join(str(arg) + for arg in (thing1, thing2) + args))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: - raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) + raise AssertionError("{} <= {}".format(str(thing1), str(thing2))) def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: - raise AssertionError("%s < %s" % (str(thing1), str(thing2))) + raise AssertionError("{} < {}".format(str(thing1), str(thing2))) def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except JSONRPCException: raise AssertionError( "Use assert_raises_rpc_error() to test RPC failures") except exc as e: if message is not None and message not in e.error['message']: raise AssertionError( "Expected substring not found:" + e.error['message']) except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_raises_process_error(returncode, output, fun, *args, **kwds): """Execute a process and asserts the process return code and output. Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError and verifies that the return code and output are as expected. Throws AssertionError if no CalledProcessError was raised or if the return code and output are not as expected. Args: returncode (int): the process return code. output (string): [a substring of] the process output. fun (function): the function to call. This should execute a process. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except CalledProcessError as e: if returncode != e.returncode: - raise AssertionError("Unexpected returncode %i" % e.returncode) + raise AssertionError( + "Unexpected returncode {}".format(e.returncode)) if output not in e.output: raise AssertionError("Expected substring not found:" + e.output) else: raise AssertionError("No exception raised") def assert_raises_rpc_error(code, message, fun, *args, **kwds): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was raised or if the error code/message are not as expected. Args: code (int), optional: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message (string), optional: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required. fun (function): the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ assert try_rpc(code, message, fun, *args, **kwds), "No exception raised" def try_rpc(code, message, fun, *args, **kwds): """Tries to run an rpc command. Test against error code and message if the rpc fails. Returns whether a JSONRPCException was raised.""" try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message values are correct. if (code is not None) and (code != e.error["code"]): raise AssertionError( - "Unexpected JSONRPC error code %i" % e.error["code"]) + "Unexpected JSONRPC error code {}".format(e.error["code"])) if (message is not None) and (message not in e.error['message']): raise AssertionError( "Expected substring not found:" + e.error['message']) return True except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: return False def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError( - "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) + "Couldn't interpret {!r} as hexadecimal; raised: {}".format(string, e)) def assert_is_hash_string(string, length=64): if not isinstance(string, str): - raise AssertionError("Expected a string, got type %r" % type(string)) + raise AssertionError( + "Expected a string, got type {!r}".format(type(string))) elif length and len(string) != length: raise AssertionError( - "String of length %d expected; got %d" % (length, len(string))) + "String of length {} expected; got {}".format(length, len(string))) elif not re.match('[abcdef0-9]+$', string): raise AssertionError( - "String %r contains invalid characters for a hash." % string) + "String {!r} contains invalid characters for a hash.".format(string)) def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: - raise AssertionError("%s : expected %s=%s" % - (str(item), str(key), str(value))) + raise AssertionError("{} : expected {}={}".format( + str(item), str(key), str(value))) num_matched = num_matched + 1 if num_matched == 0 and not should_not_find: - raise AssertionError("No objects matched %s" % (str(to_match))) + raise AssertionError("No objects matched {}".format(str(to_match))) if num_matched > 0 and should_not_find: - raise AssertionError("Objects were found %s" % (str(to_match))) + raise AssertionError("Objects were found {}".format(str(to_match))) # Utility functions ################### def check_json_precision(): """Make sure json library being used does not lose precision converting BCH values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def bytes_to_hex_str(byte_str): return hexlify(byte_str).decode('ascii') def hash256(byte_str): sha256 = hashlib.sha256() sha256.update(byte_str) sha256d = hashlib.sha256() sha256d.update(sha256.digest()) return sha256d.digest()[::-1] def hex_str_to_bytes(hex_str): return unhexlify(hex_str.encode('ascii')) def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None): if attempts == float('inf') and timeout == float('inf'): timeout = 60 attempt = 0 timeout += time.time() while attempt < attempts and time.time() < timeout: if lock: with lock: if predicate(): return else: if predicate(): return attempt += 1 time.sleep(0.05) # Print the cause of the timeout assert_greater_than(attempts, attempt) assert_greater_than(timeout, time.time()) raise RuntimeError('Unreachable') # RPC/P2P connection constants and functions ############################################ # The maximum number of nodes a single test can spawn MAX_NODES = 8 # Don't assign rpc or p2p ports lower than this PORT_MIN = 11000 # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 class PortSeed: # Must be initialized with a unique integer for each process n = None def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = timeout proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename( coveragedir, node_number) if coveragedir else None return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) def p2p_port(n): assert(n <= MAX_NODES) return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_url(datadir, host, port): rpc_u, rpc_p = get_auth_cookie(datadir) if host == None: host = '127.0.0.1' - return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) + return "http://{}:{}@{}:{}".format(rpc_u, rpc_p, host, int(port)) # Node functions ################ def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node" + str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: f.write("regtest=1\n") f.write("[regtest]\n") f.write("port=" + str(p2p_port(n)) + "\n") f.write("rpcport=" + str(rpc_port(n)) + "\n") f.write("listenonion=0\n") f.write("usecashaddr=1\n") return datadir def get_datadir_path(dirname, n): return os.path.join(dirname, "node" + str(n)) def get_auth_cookie(datadir): user = None password = None if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): with open(os.path.join(datadir, "bitcoin.conf"), 'r', encoding='utf8') as f: for line in f: if line.startswith("rpcuser="): assert user is None # Ensure that there is only one rpcuser line user = line.split("=")[1].strip("\n") if line.startswith("rpcpassword="): assert password is None # Ensure that there is only one rpcpassword line password = line.split("=")[1].strip("\n") if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f: userpass = f.read() split_userpass = userpass.split(':') user = split_userpass[0] password = split_userpass[1] if user is None or password is None: raise ValueError("No RPC credentials") return user, password def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node" + str(n_node), "regtest", logname) def get_bip9_status(node, key): info = node.getblockchaininfo() return info['bip9_softforks'][key] def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def disconnect_nodes(from_node, to_node): for peer_id in [peer['id'] for peer in from_node.getpeerinfo() if to_node.name in peer['subver']]: from_node.disconnectnode(nodeid=peer_id) for _ in range(50): if [peer['id'] for peer in from_node.getpeerinfo() if to_node.name in peer['subver']] == []: break time.sleep(0.1) else: raise AssertionError("timed out waiting for disconnect") def connect_nodes(from_node, to_node): host = to_node.host if host == None: host = '127.0.0.1' ip_port = host + ':' + str(to_node.p2p_port) from_node.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_node.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(a, b): connect_nodes(a, b) connect_nodes(b, a) def sync_blocks(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ # Use getblockcount() instead of waitforblockheight() to determine the # initial max height because the two RPCs look at different internal global # variables (chainActive vs latestBlock) and the former gets updated # earlier. maxheight = max(x.getblockcount() for x in rpc_connections) start_time = cur_time = time.time() while cur_time <= start_time + timeout: tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections] if all(t["height"] == maxheight for t in tips): if all(t["hash"] == tips[0]["hash"] for t in tips): return raise AssertionError("Block sync failed, mismatched block hashes:{}".format( "".join("\n {!r}".format(tip) for tip in tips))) cur_time = time.time() raise AssertionError("Block sync to height {} timed out:{}".format( maxheight, "".join("\n {!r}".format(tip) for tip in tips))) def sync_chain(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same best block """ while timeout > 0: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash == [best_hash[0]] * len(best_hash): return time.sleep(wait) timeout -= wait raise AssertionError("Chain sync failed: Best block hashes don't match") def sync_mempools(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same transactions in their memory pools """ while timeout > 0: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match + 1 if num_match == len(rpc_connections): return time.sleep(wait) timeout -= wait raise AssertionError("Mempool sync failed") # Transaction/Block functions ############################# def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i - raise RuntimeError("find_output txid %s : %s not found" % - (txid, str(amount))) + raise RuntimeError("find_output txid {} : {} not found".format( + txid, str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >= 0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append( {"txid": t["txid"], "vout": t["vout"], "address": t["address"]}) if total_in < amount_needed: - raise RuntimeError("Insufficient funds: need %d, have %d" % - (amount_needed, total_in)) + raise RuntimeError("Insufficient funds: need {}, have {}".format( + amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out + fee change = amount_in - amount if change > amount * 2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal( change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount + fee * 2) outputs = make_change(from_node, total_in, amount + fee, fee) outputs[self_address] = float(amount + fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount + fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [{"txid": self_txid, "vout": vout}] outputs = {to_node.getnewaddress(): float(amount)} rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (total_in, inputs) = gather_inputs(from_node, amount + fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for i in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before # the txout for change txouts = "81" for k in range(128): # add txout value txouts = txouts + "0000000000000000" # add length of script_pubkey txouts = txouts + "fd0402" # add script_pubkey txouts = txouts + script_pubkey return txouts def create_tx(node, coinbase, to_address, amount): inputs = [{"txid": coinbase, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx) assert_equal(signresult["complete"], True) return signresult["hex"] # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) newtx = rawtx[0:92] newtx = newtx + txouts newtx = newtx + rawtx[94:] signresult = node.signrawtransaction(newtx, None, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids