diff --git a/test/functional/test_framework/address.py b/test/functional/test_framework/address.py index 247c640e8..814c55ee0 100644 --- a/test/functional/test_framework/address.py +++ b/test/functional/test_framework/address.py @@ -1,66 +1,66 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Encode and decode BASE58, P2PKH and P2SH addresses.""" -from .script import hash256, hash160, CScript +from .script import CScript, hash160, hash256 from .util import bytes_to_hex_str, hex_str_to_bytes chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def byte_to_base58(b, version): result = '' str = bytes_to_hex_str(b) str = bytes_to_hex_str(chr(version).encode('latin-1')) + str checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str))) str += checksum[:8] value = int('0x' + str, 0) while value > 0: result = chars[value % 58] + result value //= 58 while (str[:2] == '00'): result = chars[0] + result str = str[2:] return result # TODO: def base58_decode def keyhash_to_p2pkh(hash, main=False): assert (len(hash) == 20) version = 0 if main else 111 return byte_to_base58(hash, version) def scripthash_to_p2sh(hash, main=False): assert (len(hash) == 20) version = 5 if main else 196 return byte_to_base58(hash, version) def key_to_p2pkh(key, main=False): key = check_key(key) return keyhash_to_p2pkh(hash160(key), main) def script_to_p2sh(script, main=False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) def check_key(key): if (type(key) is str): key = hex_str_to_bytes(key) # Assuming this is hex string if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): return key assert(False) def check_script(script): if (type(script) is str): script = hex_str_to_bytes(script) # Assuming this is hex string if (type(script) is bytes or type(script) is CScript): return script assert(False) diff --git a/test/functional/test_framework/blockstore.py b/test/functional/test_framework/blockstore.py index 64e163c88..d145596d6 100644 --- a/test/functional/test_framework/blockstore.py +++ b/test/functional/test_framework/blockstore.py @@ -1,162 +1,170 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """BlockStore and TxStore helper classes.""" -from .mininode import * -from io import BytesIO import dbm.dumb as dbmd +from io import BytesIO +import logging + +from .messages import ( + CBlock, + CBlockHeader, + CBlockLocator, + msg_headers, + msg_generic, +) logger = logging.getLogger("TestFramework.blockstore") class BlockStore(): """BlockStore helper class. BlockStore keeps a map of blocks and implements helper functions for responding to getheaders and getdata, and for constructing a getheaders message. """ def __init__(self, datadir): self.blockDB = dbmd.open(datadir + "/blocks", 'c') self.currentBlock = 0 self.headers_map = dict() def close(self): self.blockDB.close() def erase(self, blockhash): del self.blockDB[repr(blockhash)] # lookup an entry and return the item as raw bytes def get(self, blockhash): value = None try: value = self.blockDB[repr(blockhash)] except KeyError: return None return value # lookup an entry and return it as a CBlock def get_block(self, blockhash): ret = None serialized_block = self.get(blockhash) if serialized_block is not None: f = BytesIO(serialized_block) ret = CBlock() ret.deserialize(f) ret.calc_sha256() return ret def get_header(self, blockhash): try: return self.headers_map[blockhash] except KeyError: return None # Note: this pulls full blocks out of the database just to retrieve # the headers -- perhaps we could keep a separate data structure # to avoid this overhead. def headers_for(self, locator, hash_stop, current_tip=None): if current_tip is None: current_tip = self.currentBlock current_block_header = self.get_header(current_tip) if current_block_header is None: return None response = msg_headers() headersList = [current_block_header] maxheaders = 2000 while (headersList[0].sha256 not in locator.vHave): prevBlockHash = headersList[0].hashPrevBlock prevBlockHeader = self.get_header(prevBlockHash) if prevBlockHeader is not None: headersList.insert(0, prevBlockHeader) else: break headersList = headersList[:maxheaders] # truncate if we have too many hashList = [x.sha256 for x in headersList] index = len(headersList) if (hash_stop in hashList): index = hashList.index(hash_stop) + 1 response.headers = headersList[:index] return response def add_block(self, block): block.calc_sha256() try: self.blockDB[repr(block.sha256)] = bytes(block.serialize()) except TypeError as e: logger.exception("Unexpected error") self.currentBlock = block.sha256 self.headers_map[block.sha256] = CBlockHeader(block) def add_header(self, header): self.headers_map[header.sha256] = header # lookup the hashes in "inv", and return p2p messages for delivering # blocks found. def get_blocks(self, inv): responses = [] for i in inv: if (i.type == 2): # MSG_BLOCK data = self.get(i.hash) if data is not None: # Use msg_generic to avoid re-serialization responses.append(msg_generic(b"block", data)) return responses def get_locator(self, current_tip=None): if current_tip is None: current_tip = self.currentBlock r = [] counter = 0 step = 1 lastBlock = self.get_block(current_tip) while lastBlock is not None: r.append(lastBlock.hashPrevBlock) for i in range(step): lastBlock = self.get_block(lastBlock.hashPrevBlock) if lastBlock is None: break counter += 1 if counter > 10: step *= 2 locator = CBlockLocator() locator.vHave = r return locator class TxStore(): def __init__(self, datadir): self.txDB = dbmd.open(datadir + "/transactions", 'c') def close(self): self.txDB.close() # lookup an entry and return the item as raw bytes def get(self, txhash): value = None try: value = self.txDB[repr(txhash)] except KeyError: return None return value def add_transaction(self, tx): tx.calc_sha256() try: self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) except TypeError as e: logger.exception("Unexpected error") def get_transactions(self, inv): responses = [] for i in inv: if (i.type == 1): # MSG_TX tx = self.get(i.hash) if tx is not None: responses.append(msg_generic(b"tx", tx)) return responses diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index de30bb7b3..8a0bf4065 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -1,183 +1,201 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Utilities for manipulating blocks and transactions.""" -from .mininode import * -from .script import CScript, OP_TRUE, OP_CHECKSIG, OP_RETURN, OP_PUSHDATA2, OP_DUP, OP_HASH160, OP_EQUALVERIFY -from .mininode import CTransaction, CTxOut, CTxIn -from .util import satoshi_round +from .script import ( + CScript, + OP_CHECKSIG, + OP_DUP, + OP_EQUALVERIFY, + OP_HASH160, + OP_PUSHDATA2, + OP_RETURN, + OP_TRUE, +) +from .messages import ( + CBlock, + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + FromHex, + ToHex, + ser_string, +) from .txtools import pad_tx +from .util import satoshi_round # Create a block (with regtest difficulty) def create_block(hashprev, coinbase, nTime=None): block = CBlock() if nTime is None: import time block.nTime = int(time.time() + 600) else: block.nTime = nTime block.hashPrevBlock = hashprev block.nBits = 0x207fffff # Will break after a difficulty adjustment... block.vtx.append(coinbase) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() return block def make_conform_to_ctor(block): for tx in block.vtx: pad_tx(tx) tx.rehash() block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) def serialize_script_num(value): r = bytearray(0) if value == 0: return r neg = value < 0 absvalue = -value if neg else value while (absvalue): r.append(int(absvalue & 0xff)) absvalue >>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return r # Create a coinbase transaction, assuming no miner fees. # If pubkey is passed in, the coinbase output will be a P2PK output; # otherwise an anyone-can-spend output. def create_coinbase(height, pubkey=None): coinbase = CTransaction() coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), ser_string(serialize_script_num(height)), 0xffffffff)) coinbaseoutput = CTxOut() coinbaseoutput.nValue = 50 * COIN halvings = int(height / 150) # regtest coinbaseoutput.nValue >>= halvings if (pubkey != None): coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) else: coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) coinbase.vout = [coinbaseoutput] # Make sure the coinbase is at least 100 bytes pad_tx(coinbase) coinbase.calc_sha256() return coinbase # Create a transaction. # If the scriptPubKey is not specified, make it anyone-can-spend. def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()): tx = CTransaction() assert(n < len(prevtx.vout)) tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) tx.vout.append(CTxOut(value, scriptPubKey)) pad_tx(tx) tx.calc_sha256() return tx def get_legacy_sigopcount_block(block, fAccurate=True): count = 0 for tx in block.vtx: count += get_legacy_sigopcount_tx(tx, fAccurate) return count def get_legacy_sigopcount_tx(tx, fAccurate=True): count = 0 for i in tx.vout: count += i.scriptPubKey.GetSigOpCount(fAccurate) for j in tx.vin: # scriptSig might be of type bytes, so convert to CScript for the moment count += CScript(j.scriptSig).GetSigOpCount(fAccurate) return count def create_confirmed_utxos(node, count, age=101): """ Helper to create at least "count" utxos """ to_generate = int(0.5 * count) + age while to_generate > 0: node.generate(min(25, to_generate)) to_generate -= 25 utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} outputs[addr1] = satoshi_round(t['amount'] / 2) outputs[addr2] = satoshi_round(t['amount'] / 2) raw_tx = node.createrawtransaction(inputs, outputs) ctx = FromHex(CTransaction(), raw_tx) fee = node.calculate_fee(ctx) // 2 ctx.vout[0].nValue -= fee # Due to possible truncation, we go ahead and take another satoshi in # fees to ensure the transaction gets through ctx.vout[1].nValue -= fee + 1 signed_tx = node.signrawtransaction(ToHex(ctx))["hex"] node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): node.generate(1) utxos = node.listunspent() assert(len(utxos) >= count) return utxos def mine_big_block(node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) send_big_transactions(node, utxos, num, 100) node.generate(1) def send_big_transactions(node, utxos, num, fee_multiplier): from .cashaddr import decode txids = [] padding = "1"*(512*127) addrHash = decode(node.getnewaddress())[2] for _ in range(num): ctx = CTransaction() utxo = utxos.pop() txid = int(utxo['txid'], 16) ctx.vin.append(CTxIn(COutPoint(txid, int(utxo["vout"])), b"")) ctx.vout.append(CTxOut(0, CScript( [OP_RETURN, OP_PUSHDATA2, len(padding), bytes(padding, 'utf-8')]))) ctx.vout.append( CTxOut(int(satoshi_round(utxo['amount']*COIN)), CScript([OP_DUP, OP_HASH160, addrHash, OP_EQUALVERIFY, OP_CHECKSIG]))) # Create a proper fee for the transaction to be mined ctx.vout[1].nValue -= int(fee_multiplier * node.calculate_fee(ctx)) signresult = node.signrawtransaction( ToHex(ctx), None, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py index a2187191d..7a98ed1bd 100755 --- a/test/functional/test_framework/comptool.py +++ b/test/functional/test_framework/comptool.py @@ -1,429 +1,443 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Compare two or more bitcoinds to each other. To use, create a class that implements get_tests(), and pass it in as the test generator to TestManager. get_tests() should be a python generator that returns TestInstance objects. See below for definition. TestNode behaves as follows: Configure with a BlockStore and TxStore on_inv: log the message but don't request on_headers: log the chain tip on_pong: update ping response map (for synchronization) on_getheaders: provide headers via BlockStore on_getdata: provide blocks via BlockStore """ -from .mininode import * +import logging + from .blockstore import BlockStore, TxStore +from .messages import ( + CBlock, + CBlockHeader, + CInv, + CTransaction, + MAX_INV_SZ, + msg_block, + msg_getheaders, + msg_headers, + msg_inv, + msg_mempool, + msg_ping, +) +from .mininode import mininode_lock, P2PInterface from .util import p2p_port, wait_until -import logging logger = logging.getLogger("TestFramework.comptool") global mininode_lock class RejectResult(): """Outcome that expects rejection of a transaction or block.""" def __init__(self, code, reason=b''): self.code = code self.reason = reason def match(self, other): if self.code != other.code: return False return other.reason.startswith(self.reason) def __repr__(self): return '{}:{}'.format(self.code, self.reason or '*') class TestNode(P2PInterface): def __init__(self, block_store, tx_store): super().__init__() self.bestblockhash = None self.block_store = block_store self.block_request_map = {} self.tx_store = tx_store self.tx_request_map = {} self.block_reject_map = {} self.tx_reject_map = {} # When the pingmap is non-empty we're waiting for # a response self.pingMap = {} self.lastInv = [] self.closed = False def on_close(self): self.closed = True def on_headers(self, message): if len(message.headers) > 0: best_header = message.headers[-1] best_header.calc_sha256() self.bestblockhash = best_header.sha256 def on_getheaders(self, message): response = self.block_store.headers_for( message.locator, message.hashstop) if response is not None: self.send_message(response) def on_getdata(self, message): [self.send_message(r) for r in self.block_store.get_blocks(message.inv)] [self.send_message(r) for r in self.tx_store.get_transactions(message.inv)] for i in message.inv: if i.type == 1: self.tx_request_map[i.hash] = True elif i.type == 2: self.block_request_map[i.hash] = True def on_inv(self, message): self.lastInv = [x.hash for x in message.inv] def on_pong(self, message): try: del self.pingMap[message.nonce] except KeyError: raise AssertionError( "Got pong for unknown ping [{}]".format(repr(message))) def on_reject(self, message): if message.message == b'tx': self.tx_reject_map[message.data] = RejectResult( message.code, message.reason) if message.message == b'block': self.block_reject_map[message.data] = RejectResult( message.code, message.reason) def send_inv(self, obj): mtype = 2 if isinstance(obj, CBlock) else 1 self.send_message(msg_inv([CInv(mtype, obj.sha256)])) def send_getheaders(self): # We ask for headers from their last tip. m = msg_getheaders() m.locator = self.block_store.get_locator(self.bestblockhash) self.send_message(m) def send_header(self, header): m = msg_headers() m.headers.append(header) self.send_message(m) # This assumes BIP31 def send_ping(self, nonce): self.pingMap[nonce] = True self.send_message(msg_ping(nonce)) def received_ping_response(self, nonce): return nonce not in self.pingMap def send_mempool(self): self.lastInv = [] self.send_message(msg_mempool()) # TestInstance: # # Instances of these are generated by the test generator, and fed into the # comptool. # # "blocks_and_transactions" should be an array of # [obj, True/False/None, hash/None]: # - obj is either a CBlock, CBlockHeader, or a CTransaction, and # - the second value indicates whether the object should be accepted # into the blockchain or mempool (for tests where we expect a certain # answer), or "None" if we don't expect a certain answer and are just # comparing the behavior of the nodes being tested. # - the third value is the hash to test the tip against (if None or omitted, # use the hash of the block) # - NOTE: if a block header, no test is performed; instead the header is # just added to the block_store. This is to facilitate block delivery # when communicating with headers-first clients (when withholding an # intermediate block). # sync_every_block: if True, then each block will be inv'ed, synced, and # nodes will be tested based on the outcome for the block. If False, # then inv's accumulate until all blocks are processed (or max inv size # is reached) and then sent out in one inv message. Then the final block # will be synced across all connections, and the outcome of the final # block will be tested. # sync_every_tx: analogous to behavior for sync_every_block, except if outcome # on the final tx is None, then contents of entire mempool are compared # across all connections. (If outcome of final tx is specified as true # or false, then only the last tx is tested against outcome.) class TestInstance(): def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): self.blocks_and_transactions = objects if objects else [] self.sync_every_block = sync_every_block self.sync_every_tx = sync_every_tx class TestManager(): def __init__(self, testgen, datadir): self.test_generator = testgen self.p2p_connections = [] self.block_store = BlockStore(datadir) self.tx_store = TxStore(datadir) self.ping_counter = 1 def add_all_connections(self, nodes): for i in range(len(nodes)): # Create a p2p connection to each node node = TestNode(self.block_store, self.tx_store) node.peer_connect('127.0.0.1', p2p_port(i)) self.p2p_connections.append(node) def clear_all_connections(self): self.p2p_connections = [] def wait_for_disconnections(self): def disconnected(): return all(node.closed for node in self.p2p_connections) wait_until(disconnected, timeout=10, lock=mininode_lock) def wait_for_verack(self): return all(node.wait_for_verack() for node in self.p2p_connections) def wait_for_pings(self, counter): def received_pongs(): return all(node.received_ping_response(counter) for node in self.p2p_connections) wait_until(received_pongs, lock=mininode_lock) # sync_blocks: Wait for all connections to request the blockhash given # then send get_headers to find out the tip of each node, and synchronize # the response by using a ping (and waiting for pong with same nonce). def sync_blocks(self, blockhash, num_blocks): def blocks_requested(): return all( blockhash in node.block_request_map and node.block_request_map[blockhash] for node in self.p2p_connections ) # --> error if not requested wait_until(blocks_requested, attempts=20 * num_blocks, lock=mininode_lock) # Send getheaders message [c.send_getheaders() for c in self.p2p_connections] # Send ping and wait for response -- synchronization hack [c.send_ping(self.ping_counter) for c in self.p2p_connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 # Analogous to sync_block (see above) def sync_transaction(self, txhash, num_events): # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events) def transaction_requested(): return all( txhash in node.tx_request_map and node.tx_request_map[txhash] for node in self.p2p_connections ) # --> error if not requested wait_until(transaction_requested, attempts=20 * num_events, lock=mininode_lock) # Get the mempool [c.send_mempool() for c in self.p2p_connections] # Send ping and wait for response -- synchronization hack [c.send_ping(self.ping_counter) for c in self.p2p_connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 # Sort inv responses from each node with mininode_lock: [c.lastInv.sort() for c in self.p2p_connections] # Verify that the tip of each connection all agree with each other, and # with the expected outcome (if given) def check_results(self, blockhash, outcome): with mininode_lock: for c in self.p2p_connections: if outcome is None: if c.bestblockhash != self.p2p_connections[0].bestblockhash: return False # Check that block was rejected w/ code elif isinstance(outcome, RejectResult): if c.bestblockhash == blockhash: return False if blockhash not in c.block_reject_map: logger.error( 'Block not in reject map: {:064x}'.format(blockhash)) return False if not outcome.match(c.block_reject_map[blockhash]): logger.error('Block rejected with {} instead of expected {}: {:064x}'.format( c.block_reject_map[blockhash], outcome, blockhash)) return False elif ((c.bestblockhash == blockhash) != outcome): return False return True # Either check that the mempools all agree with each other, or that # txhash's presence in the mempool matches the outcome specified. # This is somewhat of a strange comparison, in that we're either comparing # a particular tx to an outcome, or the entire mempools altogether; # perhaps it would be useful to add the ability to check explicitly that # a particular tx's existence in the mempool is the same across all nodes. def check_mempool(self, txhash, outcome): with mininode_lock: for c in self.p2p_connections: if outcome is None: # Make sure the mempools agree with each other if c.lastInv != self.p2p_connections[0].lastInv: return False # Check that tx was rejected w/ code elif isinstance(outcome, RejectResult): if txhash in c.lastInv: return False if txhash not in c.tx_reject_map: logger.error( 'Tx not in reject map: {:064x}'.format(txhash)) return False if not outcome.match(c.tx_reject_map[txhash]): logger.error('Tx rejected with {} instead of expected {}: {:064x}'.format( c.tx_reject_map[txhash], outcome, txhash)) return False elif ((txhash in c.lastInv) != outcome): return False return True def run(self): # Wait until verack is received self.wait_for_verack() test_number = 1 for test_instance in self.test_generator.get_tests(): # We use these variables to keep track of the last block # and last transaction in the tests, which are used # if we're not syncing on every block or every tx. [block, block_outcome, tip] = [None, None, None] [tx, tx_outcome] = [None, None] invqueue = [] for test_obj in test_instance.blocks_and_transactions: b_or_t = test_obj[0] outcome = test_obj[1] # Determine if we're dealing with a block or tx if isinstance(b_or_t, CBlock): # Block test runner block = b_or_t block_outcome = outcome tip = block.sha256 # each test_obj can have an optional third argument # to specify the tip we should compare with # (default is to use the block being tested) if len(test_obj) >= 3: tip = test_obj[2] # Add to shared block_store, set as current block # If there was an open getdata request for the block # previously, and we didn't have an entry in the # block_store, then immediately deliver, because the # node wouldn't send another getdata request while # the earlier one is outstanding. first_block_with_hash = True if self.block_store.get(block.sha256) is not None: first_block_with_hash = False with mininode_lock: self.block_store.add_block(block) for c in self.p2p_connections: if first_block_with_hash and block.sha256 in c.block_request_map and c.block_request_map[block.sha256] == True: # There was a previous request for this block hash # Most likely, we delivered a header for this block # but never had the block to respond to the getdata c.send_message(msg_block(block)) else: c.block_request_map[block.sha256] = False # Either send inv's to each node and sync, or add # to invqueue for later inv'ing. if (test_instance.sync_every_block): # if we expect success, send inv and sync every block # if we expect failure, just push the block and see what happens. if outcome == True: [c.send_inv(block) for c in self.p2p_connections] self.sync_blocks(block.sha256, 1) else: [c.send_message(msg_block(block)) for c in self.p2p_connections] [c.send_ping(self.ping_counter) for c in self.p2p_connections] self.wait_for_pings(self.ping_counter) self.ping_counter += 1 if (not self.check_results(tip, outcome)): raise AssertionError( "Test failed at test {}".format(test_number)) else: invqueue.append(CInv(2, block.sha256)) elif isinstance(b_or_t, CBlockHeader): block_header = b_or_t self.block_store.add_header(block_header) [c.send_header(block_header) for c in self.p2p_connections] else: # Tx test runner assert(isinstance(b_or_t, CTransaction)) tx = b_or_t tx_outcome = outcome # Add to shared tx store and clear map entry with mininode_lock: self.tx_store.add_transaction(tx) for c in self.p2p_connections: c.tx_request_map[tx.sha256] = False # Again, either inv to all nodes or save for later if (test_instance.sync_every_tx): [c.send_inv(tx) for c in self.p2p_connections] self.sync_transaction(tx.sha256, 1) if (not self.check_mempool(tx.sha256, outcome)): raise AssertionError( "Test failed at test {}".format(test_number)) else: invqueue.append(CInv(1, tx.sha256)) # Ensure we're not overflowing the inv queue if len(invqueue) == MAX_INV_SZ: [c.send_message(msg_inv(invqueue)) for c in self.p2p_connections] invqueue = [] # Do final sync if we weren't syncing on every block or every tx. if (not test_instance.sync_every_block and block is not None): if len(invqueue) > 0: [c.send_message(msg_inv(invqueue)) for c in self.p2p_connections] invqueue = [] self.sync_blocks(block.sha256, len( test_instance.blocks_and_transactions)) if (not self.check_results(tip, block_outcome)): raise AssertionError( "Block test failed at test {}".format(test_number)) if (not test_instance.sync_every_tx and tx is not None): if len(invqueue) > 0: [c.send_message(msg_inv(invqueue)) for c in self.p2p_connections] invqueue = [] self.sync_transaction(tx.sha256, len( test_instance.blocks_and_transactions)) if (not self.check_mempool(tx.sha256, tx_outcome)): raise AssertionError( "Mempool test failed at test {}".format(test_number)) logger.info("Test {}: PASS".format(test_number)) test_number += 1 [c.disconnect_node() for c in self.p2p_connections] self.wait_for_disconnections() self.block_store.close() self.tx_store.close() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index fe2e1979e..d1693ccf3 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -1,1218 +1,1218 @@ #!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Bitcoin test framework primitive and message strcutures CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: data structures that should map to corresponding structures in bitcoin/primitives msg_block, msg_tx, msg_headers, etc.: data structures that represent network messages ser_*, deser_*: functions that handle serialization/deserialization.""" from codecs import encode import copy import hashlib from io import BytesIO import random import socket import struct import time from test_framework.siphash import siphash256 -from test_framework.util import hex_str_to_bytes, bytes_to_hex_str +from test_framework.util import bytes_to_hex_str, hex_str_to_bytes MIN_VERSION_SUPPORTED = 60001 # past bip-31 for ping/pong MY_VERSION = 70014 MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" # from version 70001 onwards, fRelay should be appended to version messages (BIP37) MY_RELAY = 1 MAX_INV_SZ = 50000 MAX_BLOCK_BASE_SIZE = 1000000 # 1 BCH in satoshis COIN = 100000000 NODE_NETWORK = (1 << 0) # NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) # NODE_WITNESS = (1 << 3) NODE_XTHIN = (1 << 4) NODE_BITCOIN_CASH = (1 << 5) NODE_NETWORK_LIMITED = (1 << 10) # Howmuch data will be read from the network at once READ_BUFFER_SIZE = 8192 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def ripemd160(s): return hashlib.new('ripemd160', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(l): r = b"" if l < 253: r = struct.pack("B", l) elif l < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v def deser_vector(f, c): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(l, ser_function_name=None): r = ser_compact_size(len(l)) for i in l: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(l): r = ser_compact_size(len(l)) for i in l: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(l): r = ser_compact_size(len(l)) for sv in l: r += ser_string(sv) return r def deser_int_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = struct.unpack("H", f.read(2))[0] def serialize(self, with_time=True): r = b"" if with_time: r += struct.pack("H", self.port) return r def __repr__(self): return "CAddress(nServices={} ip={} port={})".format( self.nServices, self.ip, self.port) class CInv(): typemap = { 0: "Error", 1: "TX", 2: "Block", 4: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" 21000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format( self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlockHeader(): def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format( self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction(): def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): return "PrefilledTransaction(index={}, tx={})".format( self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs(): def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack("= 106: self.addrFrom = CAddress() self.addrFrom.deserialize(f, False) self.nNonce = struct.unpack("= 209: self.nStartingHeight = struct.unpack("= 70001: # Relay field is optional for version 70001 onwards try: self.nRelay = struct.unpack(" class msg_headers(): command = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers={})".format(repr(self.headers)) class msg_reject(): command = b"reject" REJECT_MALFORMED = 1 def __init__(self): self.message = b"" self.code = 0 self.reason = b"" self.data = 0 def deserialize(self, f): self.message = deser_string(f) self.code = struct.unpack(" 0: self.recvbuf += t while True: msg = self._on_data() if msg == None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with mininode_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != MAGIC_BYTES[self.network]: raise ValueError( "got garbage {}".format(repr(self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack(" 0 or pre_connection) def handle_write(self): """asyncore callback when data should be written to the socket.""" with mininode_lock: # asyncore does not expose socket connection, only the first read/write # event, thus we must check connection manually here to know when we # actually connect if self.state == "connecting": self.handle_connect() if not self.writable(): return try: sent = self.send(self.sendbuf) except: self.handle_close() return self.sendbuf = self.sendbuf[sent:] def format_message(self, message): command = message.command data = message.serialize() tmsg = MAGIC_BYTES[self.network] tmsg += command tmsg += b"\x00" * (12 - len(command)) tmsg += struct.pack(" 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self): super().__init__() # Track number of messages of each type received and the most recent # message of each type self.message_count = defaultdict(int) self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs): super().peer_connect(*args, **kwargs) if send_version: # Send a version msg vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.send_message(vt, True) # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with mininode_lock: try: command = message.command.decode('ascii') self.message_count[command] += 1 self.last_message[command] = message getattr(self, 'on_' + command)(message) except: print("ERROR delivering {} ({})".format( repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_pong(self, message): pass def on_reject(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): self.verack_received = True def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format( message.nVersion, MIN_VERSION_SUPPORTED) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_for_disconnect(self, timeout=60): def test_function(): return self.state != "connected" wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message receiving helper methods def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getdata(self, timeout=60): def test_function(): return self.last_message.get("getdata") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_getheaders(self, timeout=60): def test_function(): return self.last_message.get("getheaders") wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash wait_until(test_function, timeout=timeout, lock=mininode_lock) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] wait_until(test_function, timeout=timeout, lock=mininode_lock) # Message sending helper functions def send_and_ping(self, message): self.send_message(message) self.sync_with_ping() # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): if not self.last_message.get("pong"): return False return self.last_message["pong"].nonce == self.ping_counter wait_until(test_function, timeout=timeout, lock=mininode_lock) self.ping_counter += 1 # Keep our own socket map for asyncore, so that we can track disconnects # ourselves (to workaround an issue with closing an asyncore socket when # using select) mininode_socket_map = dict() # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface, # and whenever adding anything to the send buffer (in send_message()). This # lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() class NetworkThread(threading.Thread): def __init__(self): super().__init__(name="NetworkThread") def run(self): while mininode_socket_map: # We check for whether to disconnect outside of the asyncore # loop to workaround the behavior of asyncore when using # select disconnected = [] for fd, obj in mininode_socket_map.items(): if obj.disconnect: disconnected.append(obj) [obj.handle_close() for obj in disconnected] asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) logger.debug("Network thread closing") def network_thread_start(): """Start the network thread.""" # Only one network thread may run at a time assert not network_thread_running() NetworkThread().start() def network_thread_running(): """Return whether the network thread is running.""" return any([thread.name == "NetworkThread" for thread in threading.enumerate()]) def network_thread_join(timeout=10): """Wait timeout seconds for the network thread to terminate. Throw if the network thread doesn't terminate in timeout seconds.""" network_threads = [ thread for thread in threading.enumerate() if thread.name == "NetworkThread"] assert len(network_threads) <= 1 for thread in network_threads: thread.join(timeout) assert not thread.is_alive() diff --git a/test/functional/test_framework/netutil.py b/test/functional/test_framework/netutil.py index 6058b1600..6343eac3b 100644 --- a/test/functional/test_framework/netutil.py +++ b/test/functional/test_framework/netutil.py @@ -1,168 +1,168 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Linux network utilities. Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal """ -import sys -import socket -import fcntl -import struct import array -import os from binascii import unhexlify, hexlify +import fcntl +import os +import socket +import struct +import sys # STATE_ESTABLISHED = '01' # STATE_SYN_SENT = '02' # STATE_SYN_RECV = '03' # STATE_FIN_WAIT1 = '04' # STATE_FIN_WAIT2 = '05' # STATE_TIME_WAIT = '06' # STATE_CLOSE = '07' # STATE_CLOSE_WAIT = '08' # STATE_LAST_ACK = '09' STATE_LISTEN = '0A' # STATE_CLOSING = '0B' def get_socket_inodes(pid): ''' Get list of socket inodes for process pid. ''' base = '/proc/{}/fd'.format(pid) inodes = [] for item in os.listdir(base): target = os.readlink(os.path.join(base, item)) if target.startswith('socket:'): inodes.append(int(target[8:-1])) return inodes def _remove_empty(array): return [x for x in array if x != ''] def _convert_ip_port(array): host, port = array.split(':') # convert host from mangled-per-four-bytes form as used by kernel host = unhexlify(host) host_out = '' for x in range(0, len(host) // 4): (val,) = struct.unpack('=I', host[x * 4:(x + 1) * 4]) host_out += '{:08x}'.format(val) return host_out, int(port, 16) def netstat(typ='tcp'): ''' Function to return a list with status of tcp connections at linux systems To get pid of all network process running on system, you must run this script as superuser ''' with open('/proc/net/' + typ, 'r', encoding='utf8') as f: content = f.readlines() content.pop(0) result = [] for line in content: # Split lines and remove empty spaces. line_array = _remove_empty(line.split(' ')) tcp_id = line_array[0] l_addr = _convert_ip_port(line_array[1]) r_addr = _convert_ip_port(line_array[2]) state = line_array[3] # Need the inode to match with process pid. inode = int(line_array[9]) nline = [tcp_id, l_addr, r_addr, state, inode] result.append(nline) return result def get_bind_addrs(pid): ''' Get bind addresses as (host,port) tuples for process pid. ''' inodes = get_socket_inodes(pid) bind_addrs = [] for conn in netstat('tcp') + netstat('tcp6'): if conn[3] == STATE_LISTEN and conn[4] in inodes: bind_addrs.append(conn[1]) return bind_addrs # from: http://code.activestate.com/recipes/439093/ def all_interfaces(): ''' Return all interfaces that are up ''' is_64bits = sys.maxsize > 2**32 struct_size = 40 if is_64bits else 32 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) max_possible = 8 # initial value while True: bytes = max_possible * struct_size names = array.array('B', b'\0' * bytes) outbytes = struct.unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, # SIOCGIFCONF struct.pack('iL', bytes, names.buffer_info()[0]) ))[0] if outbytes == bytes: max_possible *= 2 else: break namestr = names.tobytes() return [(namestr[i:i + 16].split(b'\0', 1)[0], socket.inet_ntoa(namestr[i + 20:i + 24])) for i in range(0, outbytes, struct_size)] def addr_to_hex(addr): ''' Convert string IPv4 or IPv6 address to binary address as returned by get_bind_addrs. Very naive implementation that certainly doesn't work for all IPv6 variants. ''' if '.' in addr: # IPv4 addr = [int(x) for x in addr.split('.')] elif ':' in addr: # IPv6 sub = [[], []] # prefix, suffix x = 0 addr = addr.split(':') for i, comp in enumerate(addr): if comp == '': # skip empty component at beginning or end if i == 0 or i == (len(addr) - 1): continue x += 1 # :: skips to suffix assert(x < 2) else: # two bytes per component val = int(comp, 16) sub[x].append(val >> 8) sub[x].append(val & 0xff) nullbytes = 16 - len(sub[0]) - len(sub[1]) assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) addr = sub[0] + ([0] * nullbytes) + sub[1] else: raise ValueError('Could not parse address {}'.format(addr)) return hexlify(bytearray(addr)).decode('ascii') def test_ipv6_local(): ''' Check for (local) IPv6 support. ''' import socket # By using SOCK_DGRAM this will not actually make a connection, but it will # fail if there is no route to IPv6 localhost. have_ipv6 = True try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.connect(('::1', 0)) except socket.error: have_ipv6 = False return have_ipv6 diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index 434028625..277159f76 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -1,725 +1,734 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Functionality to build scripts, as well as SignatureHash(). This file is modified from python-bitcoinlib. """ -from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string from .bignum import bn2vch from binascii import hexlify import hashlib import struct - import sys + +from .messages import ( + CTransaction, + CTxOut, + hash256, + ser_string, + ser_uint256, + sha256, + uint256_from_str, +) + bchr = chr bord = ord if sys.version > '3': long = int def bchr(x): return bytes([x]) def bord(x): return x MAX_SCRIPT_ELEMENT_SIZE = 520 OPCODE_NAMES = {} def hash160(s): return hashlib.new('ripemd160', sha256(s)).digest() _opcode_instances = [] class CScriptOp(int): """A single script opcode""" __slots__ = [] @staticmethod def encode_op_pushdata(d): """Encode a PUSHDATA op, returning bytes""" if len(d) < 0x4c: return b'' + bchr(len(d)) + d # OP_PUSHDATA elif len(d) <= 0xff: return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1 elif len(d) <= 0xffff: return b'\x4d' + struct.pack(b'>= 8 if r[-1] & 0x80: r.append(0x80 if neg else 0) elif neg: r[-1] |= 0x80 return bytes(bchr(len(r)) + r) class CScript(bytes): """Serialized script A bytes subclass, so you can use this directly whenever bytes are accepted. Note that this means that indexing does *not* work - you'll get an index by byte rather than opcode. This format was chosen for efficiency so that the general case would not require creating a lot of little CScriptOP objects. iter(script) however does iterate by opcode. """ @classmethod def __coerce_instance(cls, other): # Coerce other into bytes if isinstance(other, CScriptOp): other = bchr(other) elif isinstance(other, CScriptNum): if (other.value == 0): other = bchr(CScriptOp(OP_0)) else: other = CScriptNum.encode(other) elif isinstance(other, int): if 0 <= other <= 16: other = bytes(bchr(CScriptOp.encode_op_n(other))) elif other == -1: other = bytes(bchr(OP_1NEGATE)) else: other = CScriptOp.encode_op_pushdata(bn2vch(other)) elif isinstance(other, (bytes, bytearray)): other = CScriptOp.encode_op_pushdata(other) return other def __add__(self, other): # Do the coercion outside of the try block so that errors in it are # noticed. other = self.__coerce_instance(other) try: # bytes.__add__ always returns bytes instances unfortunately return CScript(super(CScript, self).__add__(other)) except TypeError: raise TypeError( 'Can not add a {!r} instance to a CScript'.format(other.__class__)) def join(self, iterable): # join makes no sense for a CScript() raise NotImplementedError def __new__(cls, value=b''): if isinstance(value, bytes) or isinstance(value, bytearray): return super(CScript, cls).__new__(cls, value) else: def coerce_iterable(iterable): for instance in iterable: yield cls.__coerce_instance(instance) # Annoyingly on both python2 and python3 bytes.join() always # returns a bytes instance even when subclassed. return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) def raw_iter(self): """Raw iteration Yields tuples of (opcode, data, sop_idx) so that the different possible PUSHDATA encodings can be accurately distinguished, as well as determining the exact opcode byte indexes. (sop_idx) """ i = 0 while i < len(self): sop_idx = i opcode = bord(self[i]) i += 1 if opcode > OP_PUSHDATA4: yield (opcode, None, sop_idx) else: datasize = None pushdata_type = None if opcode < OP_PUSHDATA1: pushdata_type = 'PUSHDATA({})'.format(opcode) datasize = opcode elif opcode == OP_PUSHDATA1: pushdata_type = 'PUSHDATA1' if i >= len(self): raise CScriptInvalidError( 'PUSHDATA1: missing data length') datasize = bord(self[i]) i += 1 elif opcode == OP_PUSHDATA2: pushdata_type = 'PUSHDATA2' if i + 1 >= len(self): raise CScriptInvalidError( 'PUSHDATA2: missing data length') datasize = bord(self[i]) + (bord(self[i + 1]) << 8) i += 2 elif opcode == OP_PUSHDATA4: pushdata_type = 'PUSHDATA4' if i + 3 >= len(self): raise CScriptInvalidError( 'PUSHDATA4: missing data length') datasize = bord(self[i]) + (bord(self[i + 1]) << 8) + \ (bord(self[i + 2]) << 16) + (bord(self[i + 3]) << 24) i += 4 else: assert False # shouldn't happen data = bytes(self[i:i + datasize]) # Check for truncation if len(data) < datasize: raise CScriptTruncatedPushDataError( '{}: truncated data'.format(pushdata_type, data)) i += datasize yield (opcode, data, sop_idx) def __iter__(self): """'Cooked' iteration Returns either a CScriptOP instance, an integer, or bytes, as appropriate. See raw_iter() if you need to distinguish the different possible PUSHDATA encodings. """ for (opcode, data, sop_idx) in self.raw_iter(): if data is not None: yield data else: opcode = CScriptOp(opcode) if opcode.is_small_int(): yield opcode.decode_op_n() else: yield CScriptOp(opcode) def __repr__(self): # For Python3 compatibility add b before strings so testcases don't # need to change def _repr(o): if isinstance(o, bytes): return "x('{}')".format(hexlify(o).decode('ascii')).encode() else: return repr(o) ops = [] i = iter(self) while True: op = None try: op = _repr(next(i)) except CScriptTruncatedPushDataError as err: op = '{}...'.format(_repr(err.data), err) break except CScriptInvalidError as err: op = ''.format(err) break except StopIteration: break finally: if op is not None: ops.append(op) return "CScript([{}])".format(', '.join(ops)) def GetSigOpCount(self, fAccurate): """Get the SigOp count. fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. Note that this is consensus-critical. """ n = 0 lastOpcode = OP_INVALIDOPCODE for (opcode, data, sop_idx) in self.raw_iter(): if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): n += 1 elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): if fAccurate and (OP_1 <= lastOpcode <= OP_16): n += opcode.decode_op_n() else: n += 20 lastOpcode = opcode return n SIGHASH_ALL = 1 SIGHASH_NONE = 2 SIGHASH_SINGLE = 3 SIGHASH_FORKID = 0x40 SIGHASH_ANYONECANPAY = 0x80 def FindAndDelete(script, sig): """Consensus critical, see FindAndDelete() in Satoshi codebase""" r = b'' last_sop_idx = sop_idx = 0 skip = True for (opcode, data, sop_idx) in script.raw_iter(): if not skip: r += script[last_sop_idx:sop_idx] last_sop_idx = sop_idx if script[sop_idx:sop_idx + len(sig)] == sig: skip = True else: skip = False if not skip: r += script[last_sop_idx:] return CScript(r) def SignatureHash(script, txTo, inIdx, hashtype): """Consensus-correct SignatureHash Returns (hash, err) to precisely match the consensus-critical behavior of the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity) """ HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' if inIdx >= len(txTo.vin): return (HASH_ONE, "inIdx {} out of range ({})".format(inIdx, len(txTo.vin))) txtmp = CTransaction(txTo) for txin in txtmp.vin: txin.scriptSig = b'' txtmp.vin[inIdx].scriptSig = FindAndDelete( script, CScript([OP_CODESEPARATOR])) if (hashtype & 0x1f) == SIGHASH_NONE: txtmp.vout = [] for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 elif (hashtype & 0x1f) == SIGHASH_SINGLE: outIdx = inIdx if outIdx >= len(txtmp.vout): return (HASH_ONE, "outIdx {} out of range ({})".format(outIdx, len(txtmp.vout))) tmp = txtmp.vout[outIdx] txtmp.vout = [] for i in range(outIdx): txtmp.vout.append(CTxOut(-1)) txtmp.vout.append(tmp) for i in range(len(txtmp.vin)): if i != inIdx: txtmp.vin[i].nSequence = 0 if hashtype & SIGHASH_ANYONECANPAY: tmp = txtmp.vin[inIdx] txtmp.vin = [] txtmp.vin.append(tmp) s = txtmp.serialize() s += struct.pack(b" 0: d = s.recv(n) if not d: raise IOError('Unexpected end of stream') rv.extend(d) n -= len(d) return rv # Implementation classes class Socks5Configuration(): """Proxy configuration.""" def __init__(self): self.addr = None # Bind address (must be set) self.af = socket.AF_INET # Bind address family self.unauth = False # Support unauthenticated self.auth = False # Support authentication class Socks5Command(): """Information about an incoming socks5 command.""" def __init__(self, cmd, atyp, addr, port, username, password): self.cmd = cmd # Command (one of Command.*) self.atyp = atyp # Address type (one of AddressType.*) self.addr = addr # Address self.port = port # Port to connect to self.username = username self.password = password def __repr__(self): return 'Socks5Command({},{},{},{},{},{})'.format( self.cmd, self.atyp, self.addr, self.port, self.username, self.password) class Socks5Connection(): def __init__(self, serv, conn, peer): self.serv = serv self.conn = conn self.peer = peer def handle(self): """Handle socks5 request according to RFC192.""" try: # Verify socks version ver = recvall(self.conn, 1)[0] if ver != 0x05: raise IOError('Invalid socks version {}'.format(ver)) # Choose authentication method nmethods = recvall(self.conn, 1)[0] methods = bytearray(recvall(self.conn, nmethods)) method = None if 0x02 in methods and self.serv.conf.auth: method = 0x02 # username/password elif 0x00 in methods and self.serv.conf.unauth: method = 0x00 # unauthenticated if method is None: raise IOError('No supported authentication method was offered') # Send response self.conn.sendall(bytearray([0x05, method])) # Read authentication (optional) username = None password = None if method == 0x02: ver = recvall(self.conn, 1)[0] if ver != 0x01: raise IOError('Invalid auth packet version {}'.format(ver)) ulen = recvall(self.conn, 1)[0] username = str(recvall(self.conn, ulen)) plen = recvall(self.conn, 1)[0] password = str(recvall(self.conn, plen)) # Send authentication response self.conn.sendall(bytearray([0x01, 0x00])) # Read connect request ver, cmd, _, atyp = recvall(self.conn, 4) if ver != 0x05: raise IOError( 'Invalid socks version {} in connect request'.format(ver)) if cmd != Command.CONNECT: raise IOError( 'Unhandled command {} in connect request'.format(cmd)) if atyp == AddressType.IPV4: addr = recvall(self.conn, 4) elif atyp == AddressType.DOMAINNAME: n = recvall(self.conn, 1)[0] addr = recvall(self.conn, n) elif atyp == AddressType.IPV6: addr = recvall(self.conn, 16) else: raise IOError('Unknown address type {}'.format(atyp)) port_hi, port_lo = recvall(self.conn, 2) port = (port_hi << 8) | port_lo # Send dummy response self.conn.sendall( bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) cmdin = Socks5Command(cmd, atyp, addr, port, username, password) self.serv.queue.put(cmdin) logger.info('Proxy: {}'.format(cmdin)) # Fall through to disconnect except Exception as e: logger.exception("socks5 request handling failed.") self.serv.queue.put(e) finally: self.conn.close() class Socks5Server(): def __init__(self, conf): self.conf = conf self.s = socket.socket(conf.af) self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.s.bind(conf.addr) self.s.listen(5) self.running = False self.thread = None self.queue = queue.Queue() # report connections and exceptions to client def run(self): while self.running: (sockconn, peer) = self.s.accept() if self.running: conn = Socks5Connection(self, sockconn, peer) thread = threading.Thread(None, conn.handle) thread.daemon = True thread.start() def start(self): assert(not self.running) self.running = True self.thread = threading.Thread(None, self.run) self.thread.daemon = True self.thread.start() def stop(self): self.running = False # connect to self to end run loop s = socket.socket(self.conf.af) s.connect(self.conf.addr) s.close() self.thread.join() diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 81e0eccb7..a7263fbaa 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -1,519 +1,519 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Base class for RPC testing.""" +import argparse from collections import deque from enum import Enum import logging -import argparse import os import pdb import shutil import sys import tempfile import time import traceback from .authproxy import JSONRPCException from . import coverage from .test_node import TestNode from .util import ( - MAX_NODES, - PortSeed, assert_equal, check_json_precision, connect_nodes_bi, disconnect_nodes, initialize_datadir, log_filename, + MAX_NODES, p2p_port, + PortSeed, rpc_port, set_node_times, sync_blocks, sync_mempools, ) class TestStatus(Enum): PASSED = 1 FAILED = 2 SKIPPED = 3 TEST_EXIT_PASSED = 0 TEST_EXIT_FAILED = 1 TEST_EXIT_SKIPPED = 77 class BitcoinTestFramework(): """Base class for a bitcoin test script. Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. Individual tests can also override the following methods to customize the test setup: - add_options() - setup_chain() - setup_network() - setup_nodes() The __init__() and main() methods should not be overridden. This class also contains various public and private helper methods.""" def __init__(self): """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" self.setup_clean_chain = False self.nodes = [] self.mocktime = 0 self.supports_cli = False def main(self): """Main function. This should not be overridden by the subclass test scripts.""" parser = argparse.ArgumentParser(usage="%(prog)s [options]") parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_argument("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"), help="Source directory containing bitcoind/bitcoin-cli (default: %(default)s)") parser.add_argument("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs") parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs") parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int, help="The seed to use for assigning port numbers (default: current process id)") parser.add_argument("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") parser.add_argument("--configfile", dest="configfile", help="Location of the test framework config file") parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true", help="Attach a python debugger if test fails") parser.add_argument("--usecli", dest="usecli", default=False, action="store_true", help="use bitcoin-cli instead of RPC for all commands") self.add_options(parser) self.options = parser.parse_args() self.set_test_params() assert hasattr( self, "num_nodes"), "Test must set self.num_nodes in set_test_params()" PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir + ":" + \ self.options.srcdir + "/qt:" + os.environ['PATH'] check_json_precision() self.options.cachedir = os.path.abspath(self.options.cachedir) # Set up temp directory and start logging if self.options.tmpdir: self.options.tmpdir = os.path.abspath(self.options.tmpdir) os.makedirs(self.options.tmpdir, exist_ok=False) else: self.options.tmpdir = tempfile.mkdtemp(prefix="test") self._start_logging() success = TestStatus.FAILED try: if self.options.usecli and not self.supports_cli: raise SkipTest( "--usecli specified but test does not support using CLI") self.setup_chain() self.setup_network() self.run_test() success = TestStatus.PASSED except JSONRPCException as e: self.log.exception("JSONRPC error") except SkipTest as e: self.log.warning("Test Skipped: {}".format(e.message)) success = TestStatus.SKIPPED except AssertionError as e: self.log.exception("Assertion failed") except KeyError as e: self.log.exception("Key error") except Exception as e: self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: self.log.warning("Exiting after keyboard interrupt") if success == TestStatus.FAILED and self.options.pdbonfailure: print("Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() if not self.options.noshutdown: self.log.info("Stopping nodes") if self.nodes: self.stop_nodes() else: self.log.info( "Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED: self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) else: self.log.warning( "Not cleaning up dir {}".format(self.options.tmpdir)) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. import glob filenames = [self.options.tmpdir + "/test_framework.log"] filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log") MAX_LINES_TO_PRINT = 1000 for fn in filenames: try: with open(fn, 'r') as f: print("From", fn, ":") print("".join(deque(f, MAX_LINES_TO_PRINT))) except OSError: print("Opening file {} failed.".format(fn)) traceback.print_exc() if success == TestStatus.PASSED: self.log.info("Tests successful") sys.exit(TEST_EXIT_PASSED) elif success == TestStatus.SKIPPED: self.log.info("Test skipped") sys.exit(TEST_EXIT_SKIPPED) else: self.log.error( "Test failed. Test logging available at {}/test_framework.log".format(self.options.tmpdir)) logging.shutdown() sys.exit(TEST_EXIT_FAILED) # Methods to override in subclass test scripts. def set_test_params(self): """Tests must this method to change default values for number of nodes, topology, etc""" raise NotImplementedError def add_options(self, parser): """Override this method to add command-line options to the test""" pass def setup_chain(self): """Override this method to customize blockchain setup""" self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: self._initialize_chain_clean() else: self._initialize_chain() def setup_network(self): """Override this method to customize test network topology""" self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. for i in range(self.num_nodes - 1): connect_nodes_bi(self.nodes[i], self.nodes[i + 1]) self.sync_all() def setup_nodes(self): """Override this method to customize test node setup""" extra_args = None if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() def run_test(self): """Tests must override this method to define test logic""" raise NotImplementedError # Public helper methods. These can be accessed by the subclass test scripts. def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None): """Instantiate TestNode objects""" if extra_args is None: extra_args = [[]] * num_nodes if binary is None: binary = [None] * num_nodes assert_equal(len(extra_args), num_nodes) assert_equal(len(binary), num_nodes) for i in range(num_nodes): self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, rpc_port=rpc_port(i), p2p_port=p2p_port(i), timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli)) def start_node(self, i, *args, **kwargs): """Start a bitcoind""" node = self.nodes[i] node.start(*args, **kwargs) node.wait_for_rpc_connection() if self.options.coveragedir is not None: coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) def start_nodes(self, extra_args=None, *args, **kwargs): """Start multiple bitcoinds""" if extra_args is None: extra_args = [None] * self.num_nodes assert_equal(len(extra_args), self.num_nodes) try: for i, node in enumerate(self.nodes): node.start(extra_args[i], *args, **kwargs) for node in self.nodes: node.wait_for_rpc_connection() except: # If one node failed to start, stop the others self.stop_nodes() raise if self.options.coveragedir is not None: for node in self.nodes: coverage.write_all_rpc_commands( self.options.coveragedir, node.rpc) def stop_node(self, i): """Stop a bitcoind test node""" self.nodes[i].stop_node() self.nodes[i].wait_until_stopped() def stop_nodes(self): """Stop multiple bitcoind test nodes""" for node in self.nodes: # Issue RPC to stop nodes node.stop_node() for node in self.nodes: # Wait for nodes to stop node.wait_until_stopped() def restart_node(self, i, extra_args=None): """Stop and start a test node""" self.stop_node(i) self.start_node(i, extra_args) def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs): with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr: try: self.start_node( i, extra_args, stderr=log_stderr, *args, **kwargs) self.stop_node(i) except Exception as e: assert 'bitcoind exited' in str(e) # node must have shutdown self.nodes[i].running = False self.nodes[i].process = None if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8') if expected_msg not in stderr: raise AssertionError( "Expected error \"" + expected_msg + "\" not found in:\n" + stderr) else: if expected_msg is None: assert_msg = "bitcoind should have exited with an error" else: assert_msg = "bitcoind should have exited with expected error " + expected_msg raise AssertionError(assert_msg) def wait_for_node_exit(self, i, timeout): self.nodes[i].process.wait(timeout) def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ disconnect_nodes(self.nodes[1], self.nodes[2]) disconnect_nodes(self.nodes[2], self.nodes[1]) self.sync_all([self.nodes[:2], self.nodes[2:]]) def join_network(self): """ Join the (previously split) network halves together. """ connect_nodes_bi(self.nodes[1], self.nodes[2]) self.sync_all() def sync_all(self, node_groups=None): if not node_groups: node_groups = [self.nodes] for group in node_groups: sync_blocks(group) sync_mempools(group) def enable_mocktime(self): """Enable mocktime for the script. mocktime may be needed for scripts that use the cached version of the blockchain. If the cached version of the blockchain is used without mocktime then the mempools will not sync due to IBD. For backwared compatibility of the python scripts with previous versions of the cache, this helper function sets mocktime to Jan 1, 2014 + (201 * 10 * 60)""" self.mocktime = 1388534400 + (201 * 10 * 60) def disable_mocktime(self): self.mocktime = 0 # Private helper methods. These should not be accessed by the subclass test scripts. def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int ll = int(self.options.loglevel) if self.options.loglevel.isdigit( ) else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) formatter = logging.Formatter( fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S') formatter.converter = time.gmtime fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) def _initialize_chain(self): """Initialize a pre-mined blockchain for use by the test. Create a cache of a 200-block-long chain (with wallet) for MAX_NODES Afterward, create num_nodes copies from the cache.""" assert self.num_nodes <= MAX_NODES create_cache = False for i in range(MAX_NODES): if not os.path.isdir(os.path.join(self.options.cachedir, 'node' + str(i))): create_cache = True break if create_cache: self.log.debug("Creating data directories from cached datadir") # find and delete old cache directories if any exist for i in range(MAX_NODES): if os.path.isdir(os.path.join(self.options.cachedir, "node" + str(i))): shutil.rmtree(os.path.join( self.options.cachedir, "node" + str(i))) # Create cache directories, run bitcoinds: for i in range(MAX_NODES): datadir = initialize_datadir(self.options.cachedir, i) self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], host=None, rpc_port=rpc_port(i), p2p_port=p2p_port(i), timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None)) self.nodes[i].clear_default_args() self.nodes[i].extend_default_args([ "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]) if i > 0: self.nodes[i].extend_default_args( ["-connect=127.0.0.1:" + str(p2p_port(0))]) self.start_node(i) # Wait for RPC connections to be ready for node in self.nodes: node.wait_for_rpc_connection() # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of # initialize_chain, only 4 nodes will generate coins. # # blocks are created with timestamps 10 minutes apart # starting from 2010 minutes in the past self.enable_mocktime() block_time = self.mocktime - (201 * 10 * 60) for i in range(2): for peer in range(4): for j in range(25): set_node_times(self.nodes, block_time) self.nodes[peer].generate(1) block_time += 10 * 60 # Must sync before next peer starts generating blocks sync_blocks(self.nodes) # Shut them down, and clean up cache directories: self.stop_nodes() self.nodes = [] self.disable_mocktime() for i in range(MAX_NODES): os.remove(log_filename(self.options.cachedir, i, "debug.log")) os.remove(log_filename( self.options.cachedir, i, "wallets/db.log")) os.remove(log_filename(self.options.cachedir, i, "peers.dat")) for i in range(self.num_nodes): from_dir = os.path.join(self.options.cachedir, "node" + str(i)) to_dir = os.path.join(self.options.tmpdir, "node" + str(i)) shutil.copytree(from_dir, to_dir) # Overwrite port/rpcport in bitcoin.conf initialize_datadir(self.options.tmpdir, i) def _initialize_chain_clean(self): """Initialize empty blockchain for use by the test. Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization.""" for i in range(self.num_nodes): initialize_datadir(self.options.tmpdir, i) class ComparisonTestFramework(BitcoinTestFramework): """Test framework for doing p2p comparison testing Sets up some bitcoind binaries: - 1 binary: test binary - 2 binaries: 1 test binary, 1 ref binary - n>2 binaries: 1 test binary, n-1 ref binaries""" def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def add_options(self, parser): parser.add_argument("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to test") parser.add_argument("--refbinary", dest="refbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to use for reference nodes (if any)") def setup_network(self): extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes if hasattr(self, "extra_args"): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args, binary=[self.options.testbinary] + [self.options.refbinary] * (self.num_nodes - 1)) self.start_nodes() class SkipTest(Exception): """This exception is raised to skip a test""" def __init__(self, message): self.message = message diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 4e86ac1b6..37ef9c3b4 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -1,325 +1,325 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import decimal import errno import http.client import json import logging import os import re import subprocess import time from .authproxy import JSONRPCException -from .mininode import COIN, FromHex, CTransaction +from .messages import COIN, CTransaction, FromHex from .util import ( assert_equal, get_rpc_proxy, + p2p_port, rpc_url, wait_until, - p2p_port, ) # For Python 3.4 compatibility JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) BITCOIND_PROC_WAIT_TIMEOUT = 60 class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, dirname, extra_args, host, rpc_port, p2p_port, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False): self.index = i self.datadir = os.path.join(dirname, "node" + str(i)) self.host = host self.rpc_port = rpc_port self.p2p_port = p2p_port self.name = "testnode-{}".format(i) if timewait: self.rpc_timeout = timewait else: # Wait for up to 60 seconds for the RPC server to respond self.rpc_timeout = 60 if binary is None: self.binary = os.getenv("BITCOIND", "bitcoind") else: self.binary = binary self.stderr = stderr self.coverage_dir = coverage_dir # Most callers will just need to add extra args to the default list # below. # For those callers that need more flexibity, they can access the # default args using the provided facilities self.extra_args = extra_args self.default_args = ["-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=" + self.name] self.cli = TestNodeCLI( os.getenv("BITCOINCLI", "bitcoin-cli"), self.datadir) self.use_cli = use_cli self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.relay_fee_cache = None self.log = logging.getLogger('TestFramework.node{}'.format(i)) self.p2ps = [] def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc is not None, "Error: RPC not initialized" assert self.rpc_connected, "Error: No RPC connection" return getattr(self.rpc, name) def clear_default_args(self): self.default_args.clear() def extend_default_args(self, args): self.default_args.extend(args) def remove_default_args(self, args): for rm_arg in args: # Remove all occurrences of rm_arg in self.default_args: # - if the arg is a flag (-flag), then the names must match # - if the arg is a value (-key=value) then the name must starts # with "-key=" (the '"' char is to avoid removing "-key_suffix" # arg is "-key" is the argument to remove). self.default_args = [def_arg for def_arg in self.default_args if rm_arg != def_arg and not def_arg.startswith(rm_arg + '=')] def start(self, extra_args=None, stderr=None, *args, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args if stderr is None: stderr = self.stderr self.process = subprocess.Popen( [self.binary] + self.default_args + extra_args, stderr=stderr, *args, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): assert self.process.poll( ) is None, "bitcoind exited with status {} during initialization".format(self.process.returncode) try: self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.host, self.rpc_port), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) self.rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.rpc_connected = True self.url = self.rpc.url self.log.debug("RPC successfully started") return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) raise AssertionError("Unable to connect to bitcoind") def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc_connected assert self.rpc wallet_path = "wallet/{}".format(wallet_name) return self.rpc / wallet_path def stop_node(self): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop() except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert_equal(return_code, 0) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) def node_encrypt_wallet(self, passphrase): """"Encrypts the wallet. This causes bitcoind to shutdown, so this method takes care of cleaning up resources.""" self.encryptwallet(passphrase) self.wait_until_stopped() def relay_fee(self, cached=True): if not self.relay_fee_cache or not cached: self.relay_fee_cache = self.getnetworkinfo()["relayfee"] return self.relay_fee_cache def calculate_fee(self, tx): # Relay fee is in satoshis per KB. Thus the 1000, and the COIN added # to get back to an amount of satoshis. billable_size_estimate = tx.billable_size() # Add some padding for signatures # NOTE: Fees must be calculated before signatures are added, # so they will never be included in the billable_size above. billable_size_estimate += len(tx.vin) * 81 return int(self.relay_fee() / 1000 * billable_size_estimate * COIN) def calculate_fee_from_txid(self, txid): ctx = FromHex(CTransaction(), self.getrawtransaction(txid)) return self.calculate_fee(ctx) def add_p2p_connection(self, p2p_conn, *args, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(*args, **kwargs) self.p2ps.append(p2p_conn) return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, "No p2p connection" return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir): self.args = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *args, input=None): # TestNodeCLI is callable with bitcoin-cli command-line args cli = TestNodeCLI(self.binary, self.datadir) cli.args = [str(arg) for arg in args] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [str(arg) for arg in args] named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()] assert not ( pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.args if named_args: p_args += ["-named"] p_args += [command] + pos_args + named_args self.log.debug("Running bitcoin-cli command: {}".format(command)) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match( r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError( returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except JSONDecodeError: return cli_stdout.rstrip("\n") diff --git a/test/functional/test_framework/txtools.py b/test/functional/test_framework/txtools.py index a8abda430..a16346886 100644 --- a/test/functional/test_framework/txtools.py +++ b/test/functional/test_framework/txtools.py @@ -1,70 +1,70 @@ -from .cdefs import MIN_TX_SIZE, MAX_TXOUT_PUBKEY_SCRIPT -from .mininode import CTransaction, FromHex, ToHex, CTxOut -from .script import OP_RETURN, CScript - import random +from .cdefs import MAX_TXOUT_PUBKEY_SCRIPT, MIN_TX_SIZE +from .messages import CTransaction, CTxOut, FromHex, ToHex +from .script import CScript, OP_RETURN + def pad_tx(tx, pad_to_size=MIN_TX_SIZE): """ Pad a transaction with op_return junk data until it is at least pad_to_size, or leave it alone if it's already bigger than that. """ curr_size = len(tx.serialize()) if curr_size >= pad_to_size: # Bail early txn is already big enough return # This code attempts to pad a transaction with opreturn vouts such that # it will be exactly pad_to_size. In order to do this we have to create # vouts of size x (maximum OP_RETURN size - vout overhead), plus the final # one subsumes any runoff which would be less than vout overhead. # # There are two cases where this is not possible: # 1. The transaction size is between pad_to_size and pad_to_size - extrabytes # 2. The transaction is already greater than pad_to_size # # Visually: # | .. x .. | .. x .. | .. x .. | .. x + desired_size % x | # VOUT_1 VOUT_2 VOUT_3 VOUT_4 # txout.value + txout.pk_script bytes + op_return extra_bytes = 8 + 1 + 1 required_padding = pad_to_size - curr_size while required_padding > 0: # We need at least extra_bytes left over each time, or we can't # subsume the final (and possibly undersized) iteration of the loop padding_len = min(required_padding, MAX_TXOUT_PUBKEY_SCRIPT - extra_bytes) assert padding_len >= 0, "Can't pad less than 0 bytes, trying {}".format( padding_len) # We will end up with less than 1 UTXO of bytes after this, add # them to this txn next_iteration_padding = required_padding - padding_len - extra_bytes if next_iteration_padding > 0 and next_iteration_padding < extra_bytes: padding_len += next_iteration_padding # If we're at exactly, or below, extra_bytes we don't want a 1 extra byte padding if padding_len <= extra_bytes: tx.vout.append(CTxOut(0, CScript([OP_RETURN]))) else: # Subtract the overhead for the TxOut padding_len -= extra_bytes padding = random.randrange( 1 << 8 * padding_len - 2, 1 << 8 * padding_len - 1) tx.vout.append( CTxOut(0, CScript([OP_RETURN, padding]))) curr_size = len(tx.serialize()) required_padding = pad_to_size - curr_size assert curr_size >= pad_to_size, "{} !>= {}".format(curr_size, pad_to_size) tx.rehash() def pad_raw_tx(rawtx_hex, min_size=MIN_TX_SIZE): """ Pad a raw transaction with OP_RETURN data until it reaches at least min_size """ tx = CTransaction() FromHex(tx, rawtx_hex) pad_tx(tx, min_size) return ToHex(tx)