diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 3bf732b47..9809394fe 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -1,498 +1,495 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test BIP68 implementation -# +"""Test BIP68 implementation.""" import time from test_framework.blocktools import ( create_block, create_coinbase, ) from test_framework.messages import ( COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, satoshi_round, sync_blocks, ) SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) # this means use time (0 means height) SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) # this is a bit-shift SEQUENCE_LOCKTIME_GRANULARITY = 9 SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "64: non-BIP68-final" class BIP68Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-blockprioritypercentage=0", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-blockprioritypercentage=0", "-acceptnonstdtxn=0", "-maxreorgdepth=-1"]] def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Generate some coins self.nodes[0].generate(110) self.log.info("Running test disable flag") self.test_disable_flag() self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() self.log.info( "Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Verifying nVersion=2 transactions aren't standard") self.test_version2_relay(before_activation=True) self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() self.log.info("Verifying nVersion=2 transactions are now standard") self.test_version2_relay(before_activation=False) self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() # send 2 BCH self.nodes[0].sendtoaddress(new_addr, 2) utxos = self.nodes[0].listunspent(0, 0) assert(len(utxos) > 0) utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee) * COIN) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] pad_tx(tx1) tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))[ "hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx2) tx2.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( self.nodes[0].getblockcount() - confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending # confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in range(num_outputs): outputs[addresses[i]] = random.randint(1, 20) * 0.01 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for i in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in range(num_inputs): # this disables sequence locks sequence_value = 0xfffffffe # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1, 10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. orig_time = self.get_median_time_past( utxos[j]["confirmations"]) # MTP of the tip cur_time = self.get_median_time_past(0) # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time # lock if random.randint(0, 1) and can_time_lock: # Find first time-lock value that fails, or latest one # that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"] * COIN # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( CTxOut(int(value - self.relayfee * tx_size * COIN / 1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))[ "hex"] if (using_sequence_locks and not should_pass): # This transaction should be rejected assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) else: # This raw transaction should be accepted self.nodes[0].sendrawtransaction(rawtx) utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # As the fees are calculated prior to the transaction being signed, # there is some uncertainty that calculate fee provides the correct # minimal fee. Since regtest coins are free, let's go ahead and # increase the fee by an order of magnitude to ensure this test # passes. fee_multiplier = 10 # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(0), CScript([b'a']))] tx2.vout[0].nValue = tx1.vout[0].nValue - \ fee_multiplier * self.nodes[0].calculate_fee(tx2) tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))] pad_tx(tx) tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( tx2.hash, -1e15, -fee_multiplier * self.nodes[0].calculate_fee(tx2)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert(tx2.hash in self.nodes[0].getrawmempool()) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( tx2.hash, 1e15, fee_multiplier * self.nodes[0].calculate_fee(tx2)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert(tx2.hash not in self.nodes[0].getrawmempool()) # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) assert(tx3.hash in self.nodes[0].getrawmempool()) self.nodes[0].generate(1) assert(tx3.hash not in self.nodes[0].getrawmempool()) # One more test, this time using height locks tx4 = test_nonzero_locks( tx3, self.nodes[0], use_height_lock=True) assert(tx4.hash in self.nodes[0].getrawmempool()) # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks( tx4, self.nodes[0], use_height_lock=True) assert(tx5.hash not in self.nodes[0].getrawmempool()) utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert(tx4.hash not in self.nodes[0].getrawmempool()) assert(tx3.hash in self.nodes[0].getrawmempool()) # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash( self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert(tx3.hash not in mempool) assert(tx2.hash in mempool) # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock( self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10) def get_csv_status(self): height = self.nodes[0].getblockchaininfo()['blocks'] return height >= 576 # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert_equal(self.get_csv_status(), False) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) pad_tx(tx2) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 # 100 block relative locktime sequence_value = 100 tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx3) tx3.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend( sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].submitblock(ToHex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 576 csv_activation_height = 576 height = self.nodes[0].getblockcount() assert_greater_than(csv_activation_height - height, 1) self.nodes[0].generate(csv_activation_height - height - 1) assert_equal(self.get_csv_status(), False) disconnect_nodes(self.nodes[0], self.nodes[1]) self.nodes[0].generate(1) assert_equal(self.get_csv_status(), True) # We have a block that has CSV activated, but we want to be at # the activation point, so we invalidate the tip. self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) connect_nodes(self.nodes[0], self.nodes[1]) sync_blocks(self.nodes) # Use self.nodes[1] to test standardness relay policy def test_version2_relay(self, before_activation): inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))[ "hex"] try: self.nodes[1].sendrawtransaction(tx_signed) assert(before_activation == False) except: assert(before_activation) if __name__ == '__main__': BIP68Test().main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 7325d8806..b94b0a1f6 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -1,1355 +1,1354 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test block processing. + +This reimplements tests from the bitcoinj/FullBlockTestGenerator used +by the pull-tester. + +We use the testing framework in which we expect a particular answer from +each test. +""" import copy import struct import time from test_framework.blocktools import ( create_block, create_coinbase, create_transaction, get_legacy_sigopcount_block, make_conform_to_ctor, ) from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS_PER_MB from test_framework.comptool import RejectResult, TestInstance, TestManager from test_framework.key import CECKey from test_framework.messages import ( CBlock, CBlockHeader, COIN, COutPoint, CTransaction, CTxIn, CTxOut, ser_uint256, uint256_from_compact, uint256_from_str, ) from test_framework.mininode import network_thread_start from test_framework.script import ( CScript, hash160, MAX_SCRIPT_ELEMENT_SIZE, OP_2DUP, OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_ELSE, OP_ENDIF, OP_EQUAL, OP_FALSE, OP_HASH160, OP_IF, OP_INVALIDOPCODE, OP_RETURN, OP_TRUE, SIGHASH_ALL, SIGHASH_FORKID, SignatureHashForkId, ) from test_framework.test_framework import ComparisonTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal class PreviousSpendableOutput(): def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending - -''' -This reimplements tests from the bitcoinj/FullBlockTestGenerator used -by the pull-tester. - -We use the testing framework in which we expect a particular answer from -each test. -''' - # Use this class for tests that require behavior other than normal "mininode" behavior. # For now, it is used to serialize a bloated varint (b64). class CBrokenBlock(CBlock): def __init__(self, header=None): super(CBrokenBlock, self).__init__(header) def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() def serialize(self): r = b"" r += super(CBlock, self).serialize() r += struct.pack(" b1 (0) -> b2 (1) block(1, spend=out[0]) save_spendable_output() yield accepted() block(2, spend=out[1]) yield accepted() save_spendable_output() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. tip(1) b3 = block(3, spend=out[1]) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) block(4, spend=out[2]) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out[2]) save_spendable_output() yield rejected() block(6, spend=out[3]) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out[2]) yield rejected() block(8, spend=out[4]) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out[3]) yield rejected() block(11, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out[3]) save_spendable_output() b13 = block(13, spend=out[4]) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is # delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out[5], additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Add a block with MAX_BLOCK_SIGOPS_PER_MB and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) tip(13) block(15, spend=out[5], script=lots_of_checksigs) yield accepted() save_spendable_output() # Test that a block with too many checksigs is rejected too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB)) block(16, spend=out[6], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) tip(15) block(17, spend=txout_b3) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) tip(13) block(18, spend=txout_b3) yield rejected() block(19, spend=out[6]) yield rejected() # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) tip(15) block(20, spend=out[7]) yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) tip(13) block(21, spend=out[6]) yield rejected() block(22, spend=out[5]) yield rejected() # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) tip(15) b23 = block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) yield accepted() save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) tip(15) b26 = block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure bitcoind isn't accepting b26 block(27, spend=out[7]) yield rejected(False) # Now try a too-large-coinbase script tip(15) b28 = block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure bitcoind isn't accepting b28 block(29, spend=out[7]) yield rejected(False) # b30 has a max-sized coinbase scriptSig. tip(23) b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = update_block(30, []) yield accepted() save_spendable_output() # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY # # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b36 (11) # \-> b34 (10) # \-> b32 (9) # # MULTISIG: each op code counts as 20 sigops. To create the edge case, # pack another 19 sigops at the end. lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ( (MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) b31 = block(31, spend=out[8], script=lots_of_multisigs) assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS_PER_MB) yield accepted() save_spendable_output() # this goes over the limit because the coinbase has one sigop too_many_multisigs = CScript( [OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) b32 = block(32, spend=out[9], script=too_many_multisigs) assert_equal(get_legacy_sigopcount_block( b32), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKMULTISIGVERIFY tip(31) lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ( (MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) block(33, spend=out[9], script=lots_of_multisigs) yield accepted() save_spendable_output() too_many_multisigs = CScript( [OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) block(34, spend=out[10], script=too_many_multisigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKSIGVERIFY tip(33) lots_of_checksigs = CScript( [OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) b35 = block(35, spend=out[10], script=lots_of_checksigs) yield accepted() save_spendable_output() too_many_checksigs = CScript( [OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB)) block(36, spend=out[11], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block tip(35) b37 = block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) tx = create_and_sign_tx(out[11].tx, out[11].n, 0) b37 = update_block(37, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid tip(35) block(38, spend=txout_b37) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Check P2SH SigOp counting # # # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) # \-> b40 (12) # # b39 - create some P2SH outputs that will require 6 sigops to spend: # # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # tip(35) b39 = block(39) b39_outputs = 0 b39_sigops_per_output = 6 # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [ OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE # This must be signed because it is spending a coinbase spend = out[11] tx = create_tx(spend.tx, spend.n, 1, p2sh_script) tx.vout.append( CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) self.sign_tx(tx, spend.tx, spend.n) tx.rehash() b39 = update_block(39, [tx]) b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest # to OP_TRUE tx_new = None tx_last = tx tx_last_n = len(tx.vout) - 1 total_size = len(b39.serialize()) while(total_size < LEGACY_MAX_BLOCK_SIZE): tx_new = create_tx(tx_last, tx_last_n, 1, p2sh_script) tx_new.vout.append( CTxOut(tx_last.vout[tx_last_n].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= LEGACY_MAX_BLOCK_SIZE: break b39.vtx.append(tx_new) # add tx to block tx_last = tx_new tx_last_n = len(tx_new.vout) - 1 b39_outputs += 1 b39 = update_block(39, []) yield accepted() save_spendable_output() # Test sigops in P2SH redeem scripts # # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. # The first tx has one sigop and then at the end we add 2 more to put us just over the max. # # b41 does the same, less one, so it has the maximum sigops permitted. # tip(39) b40 = block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) numTxs = (MAX_BLOCK_SIGOPS_PER_MB - sigops) // b39_sigops_per_output assert_equal(numTxs <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) lastAmount = b40.vtx[1].vout[0].nValue new_txs = [] for i in range(1, numTxs + 1): tx = CTransaction() tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) tx.vin.append(CTxIn(lastOutpoint, b'')) # second input is corresponding P2SH output from b39 tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) # Note: must pass the redeem_script (not p2sh_script) to the # signature hash function sighash = SignatureHashForkId( redeem_script, tx, 1, SIGHASH_ALL | SIGHASH_FORKID, lastAmount) sig = self.coinbase_key.sign( sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) scriptSig = CScript([sig, redeem_script]) tx.vin[1].scriptSig = scriptSig pad_tx(tx) tx.rehash() new_txs.append(tx) lastOutpoint = COutPoint(tx.sha256, 0) lastAmount = tx.vout[0].nValue b40_sigops_to_fill = MAX_BLOCK_SIGOPS_PER_MB - \ (numTxs * b39_sigops_per_output + sigops) + 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) pad_tx(tx) tx.rehash() new_txs.append(tx) update_block(40, new_txs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # same as b40, but one less sigop tip(39) block(41, spend=None) update_block(41, [b40tx for b40tx in b40.vtx[1:] if b40tx != tx]) b41_sigops_to_fill = b40_sigops_to_fill - 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) pad_tx(tx) update_block(41, [tx]) yield accepted() # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # tip(39) block(42, spend=out[12]) yield rejected() save_spendable_output() block(43, spend=out[13]) yield accepted() save_spendable_output() # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 yield accepted() # A block with a non-coinbase as the first tx non_coinbase = create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.block_heights[b45.sha256] = self.block_heights[ self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with no txns tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 s = ser_uint256(b46.hashMerkleRoot) yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with invalid work tip(44) b47 = block(47, solve=False) target = uint256_from_compact(b47.nBits) while b47.sha256 < target: # changed > to < b47.nNonce += 1 b47.rehash() yield rejected(RejectResult(16, b'high-hash')) # A block with timestamp > 2 hrs in the future tip(44) b48 = block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() yield rejected(RejectResult(16, b'time-too-new')) # A block with an invalid merkle hash tip(44) b49 = block(49) b49.hashMerkleRoot += 1 b49.solve() yield rejected(RejectResult(16, b'bad-txnmrklroot')) # A block with an incorrect POW limit tip(44) b50 = block(50) b50.nBits = b50.nBits - 1 b50.solve() yield rejected(RejectResult(16, b'bad-diffbits')) # A block with two coinbase txns tip(44) b51 = block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = update_block(51, [cb2]) yield rejected(RejectResult(16, b'bad-tx-coinbase')) # A block w/ duplicate txns tip(44) b52 = block(52, spend=out[15]) b52 = update_block(52, [b52.vtx[1]]) yield rejected(RejectResult(16, b'tx-duplicate')) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # tip(43) block(53, spend=out[14]) yield rejected() # rejected since b44 is at same height save_spendable_output() # invalid timestamp (b35 is 5 blocks back, so its time is # MedianTimePast) b54 = block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() yield rejected(RejectResult(16, b'time-too-old')) # valid timestamp tip(53) b55 = block(55, spend=out[15]) b55.nTime = b35.nTime update_block(55, []) yield accepted() save_spendable_output() # Test CVE-2012-2459 # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end tip(55) b57 = block(57) tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) b57 = update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # b57p2 - a good block with 6 tx'es, don't submit until end tip(55) b57p2 = block("57p2") tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) tx2 = create_tx(tx1, 0, 1) tx3 = create_tx(tx2, 0, 1) tx4 = create_tx(tx3, 0, 1) b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(len(b56p2.vtx), 6) b56p2 = update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip("57p2") yield accepted() tip(57) yield rejected() # rejected because 57p2 seen first save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> ??? (17) # # tx with prevout.n out of range tip(57) b58 = block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].tx.vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) pad_tx(tx) tx.calc_sha256() b58 = update_block(58, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # tx with output value > input value out of range tip(57) b59 = block(59) tx = create_and_sign_tx(out[17].tx, out[17].n, 51 * COIN) b59 = update_block(59, [tx]) yield rejected(RejectResult(16, b'bad-txns-in-belowout')) # reset to good chain tip(57) b60 = block(60, spend=out[17]) yield accepted() save_spendable_output() # Test BIP30 # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b61 (18) # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # tip(60) b61 = block(61, spend=out[18]) b61.vtx[0].vin[0].scriptSig = b60.vtx[ 0].vin[0].scriptSig # equalize the coinbases b61.vtx[0].rehash() b61 = update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) yield rejected(RejectResult(16, b'bad-txns-BIP30')) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # tip(60) b62 = block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final assert(out[18].n < len(out[18].tx.vout)) tx.vin.append( CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert(tx.vin[0].nSequence < 0xffffffff) tx.calc_sha256() b62 = update_block(62, [tx]) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # tip(60) b63 = block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = update_block(63, []) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # tip(60) regular_block = block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.blocks["64a"] = b64a self.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) yield TestInstance([[self.tip, None]]) # comptool workaround: to make sure b64 is delivered, manually erase # b64a from blockstore self.test.block_store.erase(b64a.sha256) tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.blocks[64] = b64 update_block(64, []) yield accepted() save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # tip(64) block(65) tx1 = create_and_sign_tx( out[19].tx, out[19].n, out[19].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 0) update_block(65, [tx1, tx2]) yield accepted() save_spendable_output() # Attempt to double-spend a transaction created in a block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b67 (20) # # tip(65) block(67) tx1 = create_and_sign_tx( out[20].tx, out[20].n, out[20].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 1) tx3 = create_and_sign_tx(tx1, 0, 2) update_block(67, [tx1, tx2, tx3]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # More tests of block subsidy # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # tip(65) block(68, additional_coinbase_value=10) tx = create_and_sign_tx( out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9) update_block(68, [tx]) yield rejected(RejectResult(16, b'bad-cb-amount')) tip(65) b69 = block(69, additional_coinbase_value=10) tx = create_and_sign_tx( out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10) update_block(69, [tx]) yield accepted() save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # tip(69) block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) pad_tx(tx) tx.rehash() update_block(70, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. # tip(69) b72 = block(72) tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) tx2 = create_and_sign_tx(tx1, 0, 1) b72 = update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) # add duplicate last transaction b71.vtx.append(b72.vtx[-1]) # b71 builds off b69 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 self.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) tip(71) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip(72) yield accepted() save_spendable_output() # Test some invalid scripts and MAX_BLOCK_SIGOPS_PER_MB # # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b** (22) # # b73 - tx with excessive sigops that are placed after an excessively large script element. # The purpose of the test is to make sure those sigops are counted. # # script is a bytearray of size 20,526 # # bytearray[0-19,998] : OP_CHECKSIG # bytearray[19,999] : OP_PUSHDATA4 # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) # bytearray[20,004-20,525]: unread data (script_element) # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) # tip(72) b73 = block(73) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = int("4e", 16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 a[MAX_BLOCK_SIGOPS_PER_MB] = element_size % 256 a[MAX_BLOCK_SIGOPS_PER_MB + 1] = element_size // 256 a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0 a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0 tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b73 = update_block(73, [tx]) assert_equal( get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # b74/75 - if we push an invalid script element, all prevous sigops are counted, # but sigops after the element are not counted. # # The invalid script element is that the push_data indicates that # there will be a large amount of data (0xffffff bytes), but we only # provide a much smaller number. These bytes are CHECKSIGS so they would # cause b75 to fail for excessive sigops, if those bytes were counted. # # b74 fails because we put MAX_BLOCK_SIGOPS_PER_MB+1 before the element # b75 succeeds because we put MAX_BLOCK_SIGOPS_PER_MB before the element # # tip(72) b74 = block(74) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xfe a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 4] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b74 = update_block(74, [tx]) yield rejected(RejectResult(16, b'bad-blk-sigops')) tip(72) b75 = block(75) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b75 = update_block(75, [tx]) yield accepted() save_spendable_output() # Check that if we push an element filled with CHECKSIGs, they are not # counted tip(75) b76 = block(76) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) b76 = update_block(76, [tx]) yield accepted() save_spendable_output() # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) # tip(76) block(77) tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10 * COIN) update_block(77, [tx77]) yield accepted() save_spendable_output() block(78) tx78 = create_tx(tx77, 0, 9 * COIN) update_block(78, [tx78]) yield accepted() block(79) tx79 = create_tx(tx78, 0, 8 * COIN) update_block(79, [tx79]) yield accepted() # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) tip(77) block(80, spend=out[25]) yield rejected() save_spendable_output() block(81, spend=out[26]) yield rejected() # other chain is same length save_spendable_output() block(82, spend=out[27]) yield accepted() # now this chain is longer, triggers re-org save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert(tx78.hash in mempool) assert(tx79.hash in mempool) # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = create_and_sign_tx( out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() update_block(83, [tx1, tx2]) yield accepted() save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # # block(84) tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) vout_offset = len(tx1.vout) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() self.sign_tx(tx1, out[29].tx, out[29].n) tx1.rehash() tx2 = create_tx(tx1, vout_offset, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = create_tx(tx1, vout_offset + 1, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = create_tx(tx1, vout_offset + 2, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = create_tx(tx1, vout_offset + 3, 0, CScript([OP_RETURN])) update_block(84, [tx1, tx2, tx3, tx4, tx5]) yield accepted() save_spendable_output() tip(83) block(85, spend=out[29]) yield rejected() block(86, spend=out[30]) yield accepted() tip(84) block(87, spend=out[30]) yield rejected() save_spendable_output() block(88, spend=out[31]) yield accepted() save_spendable_output() # trying to spend the OP_RETURN output is rejected block("89a", spend=out[32]) tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) update_block("89a", [tx]) yield rejected() # Test re-org of a week's worth of blocks (1088 blocks) # This test takes a minute or two and can be accomplished in memory # if self.options.runbarelyexpensive: tip(88) LARGE_REORG_SIZE = 1088 test1 = TestInstance(sync_every_block=False) spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) test1.blocks_and_transactions.append([self.tip, True]) save_spendable_output() spend = get_spendable_output() yield test1 chain1_tip = i # now create alt chain of same length tip(88) test2 = TestInstance(sync_every_block=False) for i in range(89, LARGE_REORG_SIZE + 89): block("alt" + str(i)) test2.blocks_and_transactions.append([self.tip, False]) yield test2 # extend alt chain to trigger re-org block("alt" + str(chain1_tip + 1)) yield accepted() # ... and re-org back to the first chain tip(chain1_tip) block(chain1_tip + 1) yield rejected() block(chain1_tip + 2) yield accepted() chain1_tip += 2 if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index 4027a751c..bf6364b45 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -1,184 +1,183 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -''' -Test behavior of -maxuploadtarget. +"""Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respecteved even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. -''' +""" + from collections import defaultdict import time from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.blocktools import mine_big_block from test_framework.messages import CInv, msg_getdata from test_framework.mininode import network_thread_start, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class TestNode(P2PInterface): def __init__(self): super().__init__() self.block_receive_map = defaultdict(int) def on_inv(self, message): pass def on_block(self, message): message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 class MaxUploadTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # Start a node with maxuploadtarget of 200 MB (/24h) self.extra_args = [["-maxuploadtarget=200"]] # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache = [] def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2 * 60 * 60 * 24 * 7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # p2p_conns[0] will only request old blocks # p2p_conns[1] will only request new blocks # p2p_conns[2] will test resetting the counters p2p_conns = [] for _ in range(3): p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) network_thread_start() for p2pc in p2p_conns: p2pc.wait_for_verack() # Test logic begins here # Now mine a big block mine_big_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24) # Mine one more block, so that the prior block looks old mine_big_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # p2p_conns[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 200 * 1024 * 1024 daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 144MB will be reserved for relaying new blocks, so expect this to # succeed for ~70 tries. for i in range(success_count): p2p_conns[0].send_message(getdata_request) p2p_conns[0].sync_with_ping() assert_equal(p2p_conns[0].block_receive_map[big_old_block], i + 1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info( "Peer 0 disconnected after downloading old block too many times") # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. # We'll try 200 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(200): p2p_conns[1].send_message(getdata_request) p2p_conns[1].sync_with_ping() assert_equal(p2p_conns[1].block_receive_map[big_new_block], i + 1) self.log.info("Peer 1 able to repeatedly download new block") # But if p2p_conns[1] tries for an old block, it gets disconnected # too. getdata_request.inv = [CInv(2, big_old_block)] p2p_conns[1].send_message(getdata_request) p2p_conns[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and p2p_conns[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) p2p_conns[2].sync_with_ping() p2p_conns[2].send_message(getdata_request) p2p_conns[2].sync_with_ping() assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") self.nodes[0].disconnect_p2ps() # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") self.stop_node(0) self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): self.nodes[0].p2p.send_message(getdata_request) self.nodes[0].p2p.sync_with_ping() assert_equal( self.nodes[0].p2p.block_receive_map[big_new_block], i + 1) getdata_request.inv = [CInv(2, big_old_block)] self.nodes[0].p2p.send_and_ping(getdata_request) # node is still connected because of the whitelist assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info( "Peer still connected after trying to download old block (whitelisted)") if __name__ == '__main__': MaxUploadTest().main() diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index 2f007f636..3c473fc16 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -1,121 +1,119 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test NULLDUMMY softfork. + +Connect to a single node. +Generate 2 blocks (save the coinbases for later). +Generate 427 more blocks. +[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. +[Policy] Check that non-NULLDUMMY transactions are rejected before activation. +[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. +[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. +""" import time from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import CTransaction, FromHex, ToHex from test_framework.mininode import network_thread_start from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" def trueDummy(tx): scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): assert(len(i) == 0) newscript.append(b'\x51') else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) tx.rehash() -''' -This test is meant to exercise NULLDUMMY softfork. -Connect to a single node. -Generate 2 blocks (save the coinbases for later). -Generate 427 more blocks. -[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. -[Policy] Check that non-NULLDUMMY transactions are rejected before activation. -[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. -[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. -''' - - class NULLDUMMYTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1']] def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address])[ 'address'] network_thread_start() # Block 2 self.coinbase_blocks = self.nodes[0].generate(2) coinbase_txid = [] for i in self.coinbase_blocks: coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) # Block 429 self.nodes[0].generate(427) self.lastblockhash = self.nodes[0].getbestblockhash() self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = 429 self.lastblocktime = int(time.time()) + 429 self.log.info( "Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction( self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.nodes[0].sendrawtransaction(ToHex(test1txs[0]), True) test1txs.append(self.create_transaction( self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.nodes[0].sendrawtransaction(ToHex(test1txs[1]), True) self.block_submit(self.nodes[0], test1txs, True) self.log.info( "Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction( self.nodes[0], txid2, self.ms_address, 48) trueDummy(test2tx) assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, ToHex(test2tx), True) self.log.info( "Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") self.block_submit(self.nodes[0], [test2tx], True) def create_transaction(self, node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransactionwithwallet(rawtx) return FromHex(CTransaction(), signresult['hex']) def block_submit(self, node, txs, accept=False): block = create_block(self.tip, create_coinbase( self.lastblockheight + 1), self.lastblocktime + 1) block.nVersion = 4 for tx in txs: tx.rehash() block.vtx.append(tx) block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) if (accept): assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 else: assert_equal(node.getbestblockhash(), self.lastblockhash) if __name__ == '__main__': NULLDUMMYTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 86b369e30..28d6ab27a 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -1,230 +1,230 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test bitcoind with different proxy configuration. -''' Test plan: - Start bitcoind's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on bitcoind side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create bitcoinds that connect to them - Manipulate the bitcoinds using addnode (onetry) an observe effects addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name -''' +""" import os import socket from test_framework.netutil import test_ipv6_local from test_framework.socks5 import ( AddressType, Socks5Command, Socks5Configuration, Socks5Server, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, PORT_MIN, PORT_RANGE, ) RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports class ProxyTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ( '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ( '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.warning("Testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), # which return NET_UNROUTABLE for localhost args = [ ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-proxyrandomize=1'], ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-onion={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=0'], ['-listen', '-proxy={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1]), '-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() def node_test(self, node, proxies, auth, test_onion=True): rv = [] # Test: outgoing IPv4 connection through node node.addnode("15.61.23.23:1234", "onetry") cmd = proxies[0].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, # even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if self.have_ipv6: # Test: outgoing IPv6 connection through node node.addnode( "[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") cmd = proxies[1].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp # DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if test_onion: # Test: outgoing onion connection through node node.addnode("bitcoinostk4e4re.onion:8333", "onetry") cmd = proxies[2].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) # Test: outgoing DNS name connection through node node.addnode("node.noumenon:8333", "onetry") cmd = proxies[3].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) return rv def run_test(self): # basic -proxy self.node_test( self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test( self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test( self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are # unique credentials = set((x.username, x.password) for x in rv) assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test( self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d): r = {} for x in d['networks']: r[x['name']] = x return r # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n0[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n2[net]['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n3[net]['proxy'], '[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1])) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) if __name__ == '__main__': ProxyTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 9776f7197..843d3e27c 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -1,525 +1,523 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the pruning code. -# -# Test pruning code -# ******** -# WARNING: -# This test uses 4GB of disk space. -# This test takes 30 mins or more (up to 2 hours) -# ******** +WARNING: +This test uses 4GB of disk space. +This test takes 30 mins or more (up to 2 hours) +""" import time import os from test_framework.blocktools import mine_big_block from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, sync_blocks, ) MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(blockdir + f)) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-prune=550"]] def setup_network(self): self.setup_nodes() self.prunedir = os.path.join( self.nodes[2].datadir, 'regtest', 'blocks', '') connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[2], self.nodes[0]) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[0], self.nodes[4]) sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args, timewait=900) self.start_nodes() def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data for i in range(645): mine_big_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(self.prunedir + "blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: {}".format( calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this for i in range(25): mine_big_block(self.nodes[0], self.utxo_cache_0) waitstart = time.time() while os.path.isfile(self.prunedir + "blk00000.dat"): time.sleep(0.1) if time.time() - waitstart > 30: raise AssertionError( "blk00000.dat not pruned when it should be") self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have # node1's transactions to accidentally mine self.stop_node(0) self.start_node(0, extra_args=self.full_node_default_args) # Mine 24 blocks in node 1 for i in range(24): if j == 0: mine_big_block(self.nodes[1], self.utxo_cache_1) else: # Add node1's wallet transactions back to the mempool, to # avoid the mined blocks from being too small. self.nodes[1].resendwallettransactions() # tx's already in mempool from previous disconnects self.nodes[1].generate(1) # Reorg back with 25 block chain from node 0 for i in range(25): mine_big_block(self.nodes[0], self.utxo_cache_0) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[1], self.nodes[0]) connect_nodes(self.nodes[2], self.nodes[0]) sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: {}".format( calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. Reboot node 1 to # clear its mempool (hopefully make the invalidate faster). Lower the # block max size so we don't keep mining all our big mempool # transactions (from disconnected blocks) self.stop_node(1) self.start_node(1, extra_args=[ "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) invalidheight = height - 287 badhash = self.nodes[1].getblockhash(invalidheight) self.log.info("Invalidating block {} at height {}".format( badhash, invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) self.start_node(1, extra_args=[ "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[2], self.nodes[1]) sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: {}".format( self.nodes[2].getblockcount())) self.log.info("Usage possibly still high bc of stale blocks in block files: {}".format( calc_usage(self.prunedir))) self.log.info( "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") # Get node0's wallet transactions back in its mempool, to avoid the # mined blocks from being too small. self.nodes[0].resendwallettransactions() for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. # node 0 has many large tx's in its mempool from the disconnects self.nodes[0].generate(10) sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") return invalidheight, badhash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) self.log.info( "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large block are in # the block files after it, its expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer that's on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") waitstart = time.time() while self.nodes[2].getblockcount() < goalbestheight: time.sleep(0.1) if time.time() - waitstart > 900: raise AssertionError("Node 2 didn't reorg to proper height") assert(self.nodes[2].getbestblockhash() == goalbesthash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock( self.forkhash)["height"] == self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # Does nothing node.pruneblockchain(height(0)) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # height=500 should prune first file prune(500) if has_block(0): raise AssertionError( "blk00000.dat is still there, should be pruned by now") if not has_block(1): raise AssertionError( "blk00001.dat is missing when should still be there") # height=650 should prune second file prune(650) if has_block(1): raise AssertionError( "blk00001.dat is still there, should be pruned by now") # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) if not has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) if has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") if has_block(3): raise AssertionError( "blk00003.dat is still there, should be pruned by now") # stop node, start back up with auto-prune at 550MB, make sure still # runs self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) self.start_node( 2, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], self.nodes[5]) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) self.stop_node(5) # stop and start to trigger rescan self.start_node( 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") def run_test(self): self.log.info( "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") self.log.info("Mining a big blockchain of 995 blocks") # Determine default relay fee self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache_0 = [] self.utxo_cache_1 = [] self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") (self.forkheight, self.forkhash) = self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py index fcef4ef21..bbe162d19 100755 --- a/test/functional/feature_reindex.py +++ b/test/functional/feature_reindex.py @@ -1,41 +1,44 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test running bitcoind with -reindex and -reindex-chainstate options. + +- Start a single node and generate 3 blocks. +- Stop the node and restart it with -reindex. Verify that the node has reindexed up to block 3. +- Stop the node and restart it with -reindex-chainstate. Verify that the node has reindexed up to block 3. +""" -# -# Test -reindex and -reindex-chainstate with CheckBlockIndex -# import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class ReindexTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def reindex(self, justchainstate=False): self.nodes[0].generate(3) blockcount = self.nodes[0].getblockcount() self.stop_nodes() extra_args = [ ["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] self.start_nodes(extra_args) while self.nodes[0].getblockcount() < blockcount: time.sleep(0.1) assert_equal(self.nodes[0].getblockcount(), blockcount) self.log.info("Success") def run_test(self): self.reindex(False) self.reindex(True) self.reindex(False) self.reindex(True) if __name__ == '__main__': ReindexTest().main() diff --git a/test/functional/interface_http.py b/test/functional/interface_http.py index 54d3652b8..88fec3cd6 100755 --- a/test/functional/interface_http.py +++ b/test/functional/interface_http.py @@ -1,175 +1,172 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test rpc http basics -# +"""Test the RPC HTTP basics.""" import http.client import urllib.parse from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, str_to_b64str class HTTPBasicsTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 3 def setup_network(self): self.extra_args = [["-rpccorsdomain=null"], [], []] self.setup_nodes() def run_test(self): # # lowlevel check for http persistent connection # # url = urllib.parse.urlparse(self.nodes[0].url) authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock != None) # according to http/1.1 connection must still be open! # send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) # must also response with a correct json-rpc message assert(conn.sock != None) # according to http/1.1 connection must still be open! conn.close() # same should be if we add keep-alive because this should be the std. # behaviour headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock != None) # according to http/1.1 connection must still be open! # send 2nd request without closing connection conn.request('POST', '/', '{"method": "getchaintips"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) # must also response with a correct json-rpc message assert(conn.sock != None) # according to http/1.1 connection must still be open! conn.close() # now do the same with "Connection: close" headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "close"} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock == None) # now the connection must be closed after the response # node1 (2nd node) is running with disabled keep-alive option urlNode1 = urllib.parse.urlparse(self.nodes[1].url) authpair = urlNode1.username + ':' + urlNode1.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) # node2 (third node) is running with standard keep-alive parameters # which means keep-alive is on urlNode2 = urllib.parse.urlparse(self.nodes[2].url) authpair = urlNode2.username + ':' + urlNode2.password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse().read() assert(b'"error":null' in out1) assert(conn.sock != None) # connection must be closed because bitcoind should use # keep-alive by default # Check excessive request size conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('GET', '/' + ('x' * 1000), '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.NOT_FOUND) conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() conn.request('GET', '/' + ('x' * 10000), '', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.BAD_REQUEST) # Check Standard CORS request origin = "null" conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Origin": origin} conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.OK) assert_equal(out1.headers["Access-Control-Allow-Origin"], origin) assert_equal(out1.headers["Access-Control-Allow-Credentials"], "true") assert_equal(out1.headers["Access-Control-Expose-Headers"], "WWW-Authenticate") assert(b'"error":null' in out1.read()) # Check Pre-flight CORS request corsheaders = {"Origin": origin, "Access-Control-Request-Method": "POST"} conn.request('OPTIONS', '/', None, corsheaders) out1 = conn.getresponse() assert_equal(out1.status, http.client.OK) assert_equal(out1.headers["Access-Control-Allow-Origin"], origin) assert_equal(out1.headers["Access-Control-Allow-Credentials"], "true") assert_equal(out1.headers["Access-Control-Allow-Methods"], "POST") assert_equal(out1.headers["Access-Control-Allow-Headers"], "authorization,content-type") assert_equal(b'', out1.read()) # Check Standard CORS request to node without CORS, expected failure conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port) conn.connect() authpair = url.username + ':' + url.password headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Origin": origin} conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) out1 = conn.getresponse() assert_equal(out1.status, http.client.UNAUTHORIZED) assert_equal(b'', out1.read()) # Check Pre-flight CORS request to node without CORS, expected failure corsheaders = {"Origin": origin, "Access-Control-Request-Method": "POST"} conn.request('OPTIONS', '/', None, corsheaders) out1 = conn.getresponse() assert_equal(out1.status, http.client.METHOD_NOT_ALLOWED) assert_equal(b'JSONRPC server handles only POST requests', out1.read()) if __name__ == '__main__': HTTPBasicsTest().main() diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index 517fe9d40..55f3644ab 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -1,398 +1,395 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test REST interface -# +"""Test the REST API.""" from codecs import encode from decimal import Decimal import http.client from io import BytesIO import json from struct import pack, unpack import urllib.parse from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, connect_nodes_bi, hex_str_to_bytes, ) def deser_uint256(f): r = 0 for i in range(8): t = unpack(b" 0) # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool for node in self.nodes: node.invalidateblock(blocks[0]) # mempool should be empty, all txns confirmed assert_equal( set(self.nodes[0].getrawmempool()), set(spends1_id + spends2_id)) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main() diff --git a/test/functional/mempool_spend_coinbase.py b/test/functional/mempool_spend_coinbase.py index 48e4e63ef..fe866ab6a 100755 --- a/test/functional/mempool_spend_coinbase.py +++ b/test/functional/mempool_spend_coinbase.py @@ -1,58 +1,57 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test spending coinbase transactions. -# The coinbase transaction in block N can appear in block -# N+100... so is valid in the mempool when the best block -# height is N+99. -# This test makes sure coinbase spends that will be mature -# in the next block are accepted into the memory pool, -# but less mature coinbase spends are NOT. -# +"""Test spending coinbase transactions. + +The coinbase transaction in block N can appear in block +N+100... so is valid in the mempool when the best block +height is N+99. +This test makes sure coinbase spends that will be mature +in the next block are accepted into the memory pool, +but less mature coinbase spends are NOT. +""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, create_tx # Create one-input, one-output, no-fee transaction: class MempoolSpendCoinbaseTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [["-checkmempool"]] def run_test(self): chain_height = self.nodes[0].getblockcount() assert_equal(chain_height, 200) node0_address = self.nodes[0].getnewaddress() # Coinbase at height chain_height-100+1 ok in mempool, should # get mined. Coinbase at height chain_height-100+2 is # is too immature to spend. b = [self.nodes[0].getblockhash(n) for n in range(101, 103)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spends_raw = [create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids] spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0]) # coinbase at height 102 should be too immature to spend assert_raises_rpc_error(-26, "bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1]) # mempool should have just spend_101: assert_equal(self.nodes[0].getrawmempool(), [spend_101_id]) # mine a block, spend_101 should get confirmed self.nodes[0].generate(1) assert_equal(set(self.nodes[0].getrawmempool()), set()) # ... and now height 102 can be spent: spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1]) assert_equal(self.nodes[0].getrawmempool(), [spend_102_id]) if __name__ == '__main__': MempoolSpendCoinbaseTest().main() diff --git a/test/functional/mining_getblocktemplate_longpoll.py b/test/functional/mining_getblocktemplate_longpoll.py index 9c0ea4784..2ca7f6a8d 100755 --- a/test/functional/mining_getblocktemplate_longpoll.py +++ b/test/functional/mining_getblocktemplate_longpoll.py @@ -1,86 +1,87 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test longpolling with getblocktemplate.""" from decimal import Decimal import threading from test_framework.test_framework import BitcoinTestFramework from test_framework.util import get_rpc_proxy, random_transaction class LongpollThread(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) # query current longpollid templat = node.getblocktemplate() self.longpollid = templat['longpollid'] # create a new connection to the node, we can't use the same # connection from two threads self.node = get_rpc_proxy( node.url, 1, timeout=600, coveragedir=node.coverage_dir) def run(self): self.node.getblocktemplate({'longpollid': self.longpollid}) class GetBlockTemplateLPTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): self.log.info( "Warning: this test will take about 70 seconds in the best case. Be patient.") self.nodes[0].generate(10) templat = self.nodes[0].getblocktemplate() longpollid = templat['longpollid'] # longpollid should not change between successive invocations if # nothing else happens templat2 = self.nodes[0].getblocktemplate() assert(templat2['longpollid'] == longpollid) # Test 1: test that the longpolling wait if we do nothing thr = LongpollThread(self.nodes[0]) thr.start() # check that thread still lives # wait 5 seconds or until thread exits thr.join(5) assert(thr.is_alive()) # Test 2: test that longpoll will terminate if another node generates a block # generate a block on another node self.nodes[1].generate(1) # check that thread will exit now that new transaction entered mempool # wait 5 seconds or until thread exits thr.join(5) assert(not thr.is_alive()) # Test 3: test that longpoll will terminate if we generate a block # ourselves thr = LongpollThread(self.nodes[0]) thr.start() # generate a block on another node self.nodes[0].generate(1) # wait 5 seconds or until thread exits thr.join(5) assert(not thr.is_alive()) # Test 4: test that introducing a new transaction into the mempool will # terminate the longpoll thr = LongpollThread(self.nodes[0]) thr.start() # generate a random transaction and submit it min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"] # min_relay_fee is fee per 1000 bytes, which should be more than enough. (txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20) # after one minute, every 10 seconds the mempool is probed, so in 80 # seconds it should have returned thr.join(60 + 20) assert(not thr.is_alive()) if __name__ == '__main__': GetBlockTemplateLPTest().main() diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py index a9227e7d5..a7243a565 100755 --- a/test/functional/mining_prioritisetransaction.py +++ b/test/functional/mining_prioritisetransaction.py @@ -1,169 +1,166 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test PrioritiseTransaction code -# +"""Test the prioritisetransaction mining RPC.""" import time from test_framework.blocktools import ( create_confirmed_utxos, send_big_transactions, ) # FIXME: review how this test needs to be adapted w.r.t _LEGACY_MAX_BLOCK_SIZE from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.messages import COIN from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class PrioritiseTransactionTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-printpriority=1"], ["-printpriority=1"]] def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] utxo_count = 90 utxos = create_confirmed_utxos(self.nodes[0], utxo_count) txids = [] # Create 3 batches of transactions at 3 different fee rate levels range_size = utxo_count // 3 for i in range(3): txids.append([]) start_range = i * range_size end_range = start_range + range_size txids[i] = send_big_transactions(self.nodes[0], utxos[start_range:end_range], end_range - start_range, 10 * (i + 1)) # Make sure that the size of each group of transactions exceeds # LEGACY_MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create # more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert(j in mempool) sizes[i] += mempool[j]['size'] # Fail => raise utxo_count assert(sizes[i] > LEGACY_MAX_BLOCK_SIZE) # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined (lower # the priority to ensure its not mined due to priority) self.nodes[0].prioritisetransaction( txids[0][0], 0, 100 * self.nodes[0].calculate_fee_from_txid(txids[0][0])) self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0) self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() self.log.info("Assert that prioritised transaction was mined") assert(txids[0][0] not in mempool) assert(txids[0][1] in mempool) confirmed_transactions = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['tx'] # Pull the highest fee-rate transaction from a block high_fee_tx = confirmed_transactions[1] # Something high-fee should have been mined! assert(high_fee_tx != None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). # # NOTE WELL: gettransaction returns the fee as a negative number and # as fractional coins. However, the prioritisetransaction expects a # number of satoshi to add or subtract from the actual fee. # Thus the conversation here is simply int(tx_fee*COIN) to remove all fees, and then # we add the minimum fee back. tx_fee = self.nodes[0].gettransaction(high_fee_tx)['fee'] self.nodes[0].prioritisetransaction( high_fee_tx, -1e15, int(tx_fee*COIN) + self.nodes[0].calculate_fee_from_txid(high_fee_tx)) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert(high_fee_tx in mempool) # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): self.nodes[0].generate(1) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() self.log.info( "Assert that de-prioritised transaction is still in mempool") assert(high_fee_tx in mempool) for x in txids[2]: if (x != high_fee_tx): assert(x not in mempool) # Create a free, low priority transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert(len(utxo_list) > 0) utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"] txid = self.nodes[0].sendrawtransaction(tx_hex) # A tx that spends an in-mempool tx has 0 priority, so we can use it to # test the effect of using prioritise transaction for mempool # acceptance inputs = [] inputs.append({"txid": txid, "vout": 0}) outputs = {} outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs) tx2_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx2)["hex"] tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"] # This will raise an exception due to min relay fee not being met assert_raises_rpc_error(-26, "66: insufficient priority", self.nodes[0].sendrawtransaction, tx2_hex) assert(tx2_id not in self.nodes[0].getrawmempool()) # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000 byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction( tx2_id, 0, int(self.relayfee * COIN)) self.log.info( "Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) assert(tx2_id in self.nodes[0].getrawmempool()) # Test that calling prioritisetransaction is sufficient to trigger # getblocktemplate to (eventually) return a new block. mock_time = int(time.time()) self.nodes[0].setmocktime(mock_time) template = self.nodes[0].getblocktemplate() self.nodes[0].prioritisetransaction( tx2_id, 0, -int(self.relayfee * COIN)) self.nodes[0].setmocktime(mock_time + 10) new_template = self.nodes[0].getblocktemplate() assert(template != new_template) if __name__ == '__main__': PrioritiseTransactionTest().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 0b7fdf491..4b02b977b 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -1,934 +1,932 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test compact blocks (BIP 152). + +Only testing Version 1 compact blocks (txids) +""" import random from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, HeaderAndShortIDs, msg_block, msg_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ToHex, ) from test_framework.mininode import ( mininode_lock, network_thread_start, P2PInterface, ) from test_framework.script import CScript, OP_TRUE from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal, sync_blocks, wait_until -''' -CompactBlocksTest -- test compact blocks (BIP 152) - -Only testing Version 1 compact blocks (txids) -''' - # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(P2PInterface): def __init__(self): super().__init__() self.last_sendcmpct = [] self.block_announced = False # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.announced_blockhashes = set() def on_sendcmpct(self, message): self.last_sendcmpct.append(message) def on_cmpctblock(self, message): self.block_announced = True self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() self.announced_blockhashes.add( self.last_message["cmpctblock"].header_and_shortids.header.sha256) def on_headers(self, message): self.block_announced = True for x in self.last_message["headers"].headers: x.calc_sha256() self.announced_blockhashes.add(x.sha256) def on_inv(self, message): for x in self.last_message["inv"].inv: if x.type == 2: self.block_announced = True self.announced_blockhashes.add(x.hash) # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) self.last_message.pop("cmpctblock", None) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.announced_blockhashes) wait_until(received_hash, timeout=timeout, lock=mininode_lock) def send_await_disconnect(self, message, timeout=30): """Sends a message to the node and wait for disconnect. This is used when we want to send a message into the node that we expect will get us disconnected, eg an invalid block.""" self.send_message(message) wait_until(lambda: self.state != "connected", timeout=timeout, lock=mininode_lock) class CompactBlocksTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [[], ["-txindex"]] self.utxos = [] def build_block_on_tip(self, node): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block( int(tip, 16), create_coinbase(height + 1), mtp + 1) block.nVersion = 4 block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) wait_until(received_sendcmpct, timeout=30, lock=mininode_lock) with mininode_lock: # Check that the first version received is the preferred one assert_equal( test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert(peer.block_announced) with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None))) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = 999 # was: preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version - 1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" in p.last_message) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. sendcmpct.version = 1 # preferred_version-1 sendcmpct.announce = True old_node.send_and_ping(sendcmpct) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, old_node, lambda p: "cmpctblock" in p.last_message) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block)) assert_equal( int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. def test_compactblock_construction(self, node, test_node): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) test_node.wait_for_block_announcement(tip) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock( "{:02x}".format(block_hash), False)) for tx in block.vtx: tx.calc_sha256() block.rehash() # Wait until the block was announced (via compact blocks) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata with mininode_lock: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert("cmpctblock" in test_node.last_message) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert(len(header_and_shortids.prefilled_txn) >= 1) assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node, version): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: test_node.last_message.pop("getdata", None) if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert_equal(test_node.last_message["getdata"].inv[0].type, 4) assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. # Note that num_transactions is number of transactions not including the # coinbase. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) pad_tx(tx) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) ordered_txs = block.vtx block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block, ordered_txs # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert("getblocktxn" in peer.last_message) absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute( ) assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block( block, prefill_list=[0, 1, 5]) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(ordered_txs[1])) assert(ordered_txs[1].hash in node.getrawmempool()) test_node.send_and_ping(msg_tx(ordered_txs[1])) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. prefill_list = [0, 1, 2, 3, 4, 5] prefill_list.remove(block.vtx.index(ordered_txs[1])) expected_index = block.vtx.index(ordered_txs[-1]) prefill_list.remove(expected_index) comp_block.initialize_from_block(block, prefill_list=prefill_list) test_getblocktxn_response(comp_block, test_node, [expected_index]) msg_bt.block_transactions = BlockTransactions( block.sha256, [ordered_txs[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) for tx in block.vtx[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) # Clear out last request. with mininode_lock: test_node.last_message.pop("getblocktxn", None) # Send compact block comp_block.initialize_from_block(block, prefill_list=[0]) test_tip_after_message( node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert("getblocktxn" not in test_node.last_message) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block, ordered_txs = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [ordered_txs[-1].sha256, 0, ordered_txs[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in ordered_txs[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in ordered_txs[1:6]: assert(tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0]) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indices = [] with mininode_lock: assert("getblocktxn" in test_node.last_message) absolute_indices = test_node.last_message["getblocktxn"].block_txn_request.to_absolute( ) expected_indices = [] for i in [6, 7, 8, 9, 10]: expected_indices.append(block.vtx.index(ordered_txs[i])) assert_equal(absolute_indices, sorted(expected_indices)) # Now give an incorrect response. # Note that it's possible for bitcoind to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change were made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() msg.block_transactions = BlockTransactions( block.sha256, [ordered_txs[5]] + ordered_txs[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock) assert_equal(len(test_node.last_message["getdata"].inv), 1) assert(test_node.last_message["getdata"].inv[0].type == 2) assert_equal( test_node.last_message["getdata"].inv[0].hash, block.sha256) # Deliver the block test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # bitcoind will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute( sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int( block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop( 0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) test_node.last_message.pop("blocktxn", None) current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), [0]) with mininode_lock: test_node.last_message.pop("block", None) test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(block_hash, 16)) assert "blocktxn" not in test_node.last_message def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() node.generate(1) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) test_node.clear_block_announcement() with mininode_lock: test_node.last_message.pop("block", None) test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock) with mininode_lock: test_node.last_message["block"].block.calc_sha256() assert_equal( test_node.last_message["block"].block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert(found) # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_message.pop("blocktxn", None) test_node.send_and_ping(msg) with mininode_lock: assert "blocktxn" not in test_node.last_message def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 10) [l.clear_block_announcement() for l in listeners] node.submitblock(ToHex(block)) for l in listeners: wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) with mininode_lock: for l in listeners: assert "cmpctblock" in l.last_message l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256( ) assert_equal( l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert(len(self.utxos)) utxo = self.utxos[0] block, ordered_txs = self.build_block_with_transactions(node, utxo, 5) block.vtx.remove(ordered_txs[3]) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4]) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert(int(node.getbestblockhash(), 16) is not block.sha256) test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version=1): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert(len(self.utxos)) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block, _ = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert "getblocktxn" in peer.last_message return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() # TODO: modify txhash in a way that doesn't impact txid. delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) # Because txhash isn't modified, we end up reconstructing the same block # assert(int(node.getbestblockhash(), 16) != block.sha256) msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = self.nodes[0].add_p2p_connection(TestNode()) self.ex_softfork_node = self.nodes[1].add_p2p_connection( TestNode(), services=NODE_NETWORK) self.old_node = self.nodes[1].add_p2p_connection( TestNode(), services=NODE_NETWORK) network_thread_start() self.test_node.wait_for_verack() # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_sendcmpct( self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblock_construction( self.nodes[1], self.ex_softfork_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_compactblock_requests( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) sync_blocks(self.nodes) self.log.info( "\tTesting compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblocks_not_at_tip( self.nodes[1], self.ex_softfork_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_incorrect_blocktxn_response( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) # End-to-end block relay tests self.log.info("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0]) self.request_cb_announcements(self.old_node, self.nodes[1]) self.request_cb_announcements( self.ex_softfork_node, self.nodes[1], version=2) self.test_end_to_end_block_relay( self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay( self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node]) self.log.info("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock( self.nodes[1], self.ex_softfork_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) self.log.info( "\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers( self.nodes[1], self.ex_softfork_node, self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main() diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index 6c1db2fe0..c8662a70c 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -1,113 +1,109 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -# +"""Test processing of feefilter messages.""" from decimal import Decimal import time from test_framework.messages import msg_feefilter from test_framework.mininode import ( mininode_lock, network_thread_start, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import sync_blocks, sync_mempools -''' -FeeFilterTest -- test processing of feefilter messages -''' - def hashToHex(hash): return format(hash, '064x') # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): for x in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True time.sleep(1) return False class TestNode(P2PInterface): def __init__(self): super().__init__() self.txinvs = [] def on_inv(self, message): for i in message.inv: if (i.type == 1): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): with mininode_lock: self.txinvs = [] class FeeFilterTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): node1 = self.nodes[1] node0 = self.nodes[0] # Get out of IBD node1.generate(1) sync_blocks(self.nodes) # Setup the p2p connections and start up the network thread. self.nodes[0].add_p2p_connection(TestNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Test that invs are received for all txs at feerate of 20 sat/byte node1.settxfee(Decimal("0.00020000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, self.nodes[0].p2p)) self.nodes[0].p2p.clear_invs() # Set a filter of 15 sat/byte self.nodes[0].p2p.send_and_ping(msg_feefilter(15000)) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, self.nodes[0].p2p)) self.nodes[0].p2p.clear_invs() # Change tx fee rate to 10 sat/byte and test they are no longer # received node1.settxfee(Decimal("0.00010000")) [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] sync_mempools(self.nodes) # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is # unfortunately reliant on the current relay behavior where we batch up # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. node0.settxfee(Decimal("0.00020000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] assert(allInvsMatch(txids, self.nodes[0].p2p)) self.nodes[0].p2p.clear_invs() # Remove fee filter and check that txs are received again self.nodes[0].p2p.send_and_ping(msg_feefilter(0)) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, self.nodes[0].p2p)) self.nodes[0].p2p.clear_invs() if __name__ == '__main__': FeeFilterTest().main() diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index f64179a4a..2076f90dc 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -1,150 +1,148 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test node responses to invalid blocks. + +In this test we connect to one node over p2p, and test block requests: +1) Valid blocks should be requested and become chain tip. +2) Invalid block with duplicated transaction should be re-requested. +3) Invalid block with bad coinbase value should be rejected and not +re-requested. +""" import copy import time from test_framework.blocktools import ( create_block, create_coinbase, create_transaction, ) from test_framework.comptool import RejectResult, TestInstance, TestManager from test_framework.messages import COIN from test_framework.mininode import network_thread_start from test_framework.test_framework import ComparisonTestFramework from test_framework.util import assert_equal -''' -In this test we connect to one node over p2p, and test block requests: -1) Valid blocks should be requested and become chain tip. -2) Invalid block with duplicated transaction should be re-requested. -3) Invalid block with bad coinbase value should be rejected and not -re-requested. -''' - # Use the ComparisonTestFramework with 1 node: only use --testbinary. - - class InvalidBlockRequestTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) self.tip = None self.block_time = None self.extra_args = [["-whitelist=127.0.0.1"]] network_thread_start() test.run() def get_tests(self): if self.tip is None: self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 ''' Create a new block with an anyone-can-spend coinbase ''' height = 1 block = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block.solve() self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) block1 = block ''' Now we need that block to mature so we can spend the coinbase. ''' test = TestInstance(sync_every_block=False) for i in range(100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() self.tip = block.sha256 self.block_time += 1 test.blocks_and_transactions.append([block, True]) height += 1 yield test assert(block.sha256 == int(self.nodes[0].getbestblockhash(), 16)) ''' Now we use merkle-root malleability to generate an invalid block with same blockheader. Manufacture a block with 3 transactions (coinbase, spend of prior coinbase, spend of that spend). Duplicate the 3rd transaction to leave merkle root and blockheader unchanged but invalidate the block. ''' block2 = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 # b'0x51' is OP_TRUE tx1 = create_transaction(block1.vtx[0], 0, b'', 50 * COIN) tx2 = create_transaction(tx1, 0, b'\x51', 50 * COIN) block2.vtx.extend([tx1, tx2]) block2.vtx = [block2.vtx[0]] + \ sorted(block2.vtx[1:], key=lambda tx: tx.get_id()) block2.hashMerkleRoot = block2.calc_merkle_root() block2.rehash() block2.solve() orig_hash = block2.sha256 block2_orig = copy.deepcopy(block2) # Mutate block 2 block2.vtx.append(block2.vtx[2]) assert_equal(block2.hashMerkleRoot, block2.calc_merkle_root()) assert_equal(orig_hash, block2.rehash()) assert(block2_orig.vtx != block2.vtx) self.tip = block2.sha256 yield TestInstance([[block2, RejectResult(16, b'bad-txns-duplicate')]]) yield TestInstance([[block2_orig, True]]) height += 1 # Check transactions for duplicate inputs self.log.info("Test duplicate input block.") block2_orig.vtx[2].vin.append(block2_orig.vtx[2].vin[0]) block2.vtx = [block2.vtx[0]] + \ sorted(block2.vtx[1:], key=lambda tx: tx.get_id()) block2_orig.vtx[2].rehash() block2_orig.hashMerkleRoot = block2_orig.calc_merkle_root() block2_orig.rehash() block2_orig.solve() yield TestInstance([[block2_orig, RejectResult(16, b'bad-txns-inputs-duplicate')]]) ''' Make sure that a totally screwed up block is not valid. ''' block3 = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block3.vtx[0].vout[0].nValue = 51 * COIN # Too high! block3.vtx[0].sha256 = None block3.vtx[0].calc_sha256() block3.hashMerkleRoot = block3.calc_merkle_root() block3.rehash() block3.solve() yield TestInstance([[block3, RejectResult(16, b'bad-cb-amount')]]) if __name__ == '__main__': InvalidBlockRequestTest().main() diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index 27722da36..8a9bb0d6f 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -1,86 +1,86 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test node responses to invalid transactions. + +In this test we connect to one node over p2p, and test tx requests. +""" import time from test_framework.blocktools import ( create_block, create_coinbase, create_transaction, ) from test_framework.comptool import RejectResult, TestInstance, TestManager from test_framework.messages import COIN from test_framework.mininode import network_thread_start from test_framework.test_framework import ComparisonTestFramework -''' -In this test we connect to one node over p2p, and test tx requests. -''' - # Use the ComparisonTestFramework with 1 node: only use --testbinary. class InvalidTxRequestTest(ComparisonTestFramework): ''' Can either run this test as 1 node with expected answers, or two and compare them. Change the "outcome" variable from each TestInstance object to only do the comparison. ''' def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) self.tip = None self.block_time = None network_thread_start() test.run() def get_tests(self): if self.tip is None: self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 ''' Create a new block with an anyone-can-spend coinbase ''' height = 1 block = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Now we need that block to mature so we can spend the coinbase. ''' test = TestInstance(sync_every_block=False) for i in range(100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() self.tip = block.sha256 self.block_time += 1 test.blocks_and_transactions.append([block, True]) height += 1 yield test # b'\x64' is OP_NOTIF # Transaction will be rejected with code 16 (REJECT_INVALID) tx1 = create_transaction( self.block1.vtx[0], 0, b'\x64', 50 * COIN - 12000) yield TestInstance([[tx1, RejectResult(16, b'mandatory-script-verify-flag-failed')]]) # TODO: test further transactions... if __name__ == '__main__': InvalidTxRequestTest().main() diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index bba80b9f4..69257e762 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -1,170 +1,171 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test message sending before handshake completion. A node should never send anything other than VERSION/VERACK/REJECT until it's received a VERACK. This test connects to a node and sends it a few messages, trying to intice it -into sending us something it shouldn't.""" +into sending us something it shouldn't. +""" import time from test_framework.mininode import ( mininode_lock, network_thread_join, network_thread_start, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import wait_until banscore = 10 class CLazyNode(P2PInterface): def __init__(self): super().__init__() self.unexpected_msg = False self.ever_connected = False def bad_message(self, message): self.unexpected_msg = True self.log.info( "should not have received message: {}".format(message.command)) def on_open(self): self.ever_connected = True def on_version(self, message): self.bad_message(message) def on_verack(self, message): self.bad_message(message) def on_reject(self, message): self.bad_message(message) def on_inv(self, message): self.bad_message(message) def on_addr(self, message): self.bad_message(message) def on_getdata(self, message): self.bad_message(message) def on_getblocks(self, message): self.bad_message(message) def on_tx(self, message): self.bad_message(message) def on_block(self, message): self.bad_message(message) def on_getaddr(self, message): self.bad_message(message) def on_headers(self, message): self.bad_message(message) def on_getheaders(self, message): self.bad_message(message) def on_ping(self, message): self.bad_message(message) def on_mempool(self, message): self.bad_message(message) def on_pong(self, message): self.bad_message(message) def on_feefilter(self, message): self.bad_message(message) def on_sendheaders(self, message): self.bad_message(message) def on_sendcmpct(self, message): self.bad_message(message) def on_cmpctblock(self, message): self.bad_message(message) def on_getblocktxn(self, message): self.bad_message(message) def on_blocktxn(self, message): self.bad_message(message) # Node that never sends a version. We'll use this to send a bunch of messages # anyway, and eventually get disconnected. class CNodeNoVersionBan(CLazyNode): # send a bunch of veracks without sending a message. This should get us disconnected. # NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes def on_open(self): super().on_open() for i in range(banscore): self.send_message(msg_verack()) def on_reject(self, message): pass # Node that never sends a version. This one just sits idle and hopes to receive # any message (it shouldn't!) class CNodeNoVersionIdle(CLazyNode): def __init__(self): super().__init__() # Node that sends a version but not a verack. class CNodeNoVerackIdle(CLazyNode): def __init__(self): self.version_received = False super().__init__() def on_reject(self, message): pass def on_verack(self, message): pass # When version is received, don't reply with a verack. Instead, see if the # node will give us a message that it shouldn't. This is not an exhaustive # list! def on_version(self, message): self.version_received = True self.send_message(msg_ping()) self.send_message(msg_getaddr()) class P2PLeakTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [['-banscore=' + str(banscore)]] def run_test(self): no_version_bannode = self.nodes[0].add_p2p_connection( CNodeNoVersionBan(), send_version=False) no_version_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVersionIdle(), send_version=False) no_verack_idlenode = self.nodes[0].add_p2p_connection( CNodeNoVerackIdle()) network_thread_start() wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock) wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generate(1) # Give the node enough time to possibly leak out a message time.sleep(5) # This node should have been banned assert no_version_bannode.state != "connected" self.nodes[0].disconnect_p2ps() # Wait until all connections are closed and the network thread has terminated wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0) network_thread_join() # Make sure no unexpected messages came in assert(no_version_bannode.unexpected_msg == False) assert(no_version_idlenode.unexpected_msg == False) assert(no_verack_idlenode.unexpected_msg == False) if __name__ == '__main__': P2PLeakTest().main() diff --git a/test/functional/p2p_mempool.py b/test/functional/p2p_mempool.py index 2155e7ee6..6ae0317ed 100755 --- a/test/functional/p2p_mempool.py +++ b/test/functional/p2p_mempool.py @@ -1,33 +1,38 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test p2p mempool message. + +Test that nodes are disconnected if they send mempool messages when bloom +filters are not enabled. +""" from test_framework.messages import msg_mempool from test_framework.mininode import network_thread_start, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class P2PMempoolTests(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.extra_args = [["-peerbloomfilters=0"]] def run_test(self): # Add a p2p connection self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # request mempool self.nodes[0].p2p.send_message(msg_mempool()) self.nodes[0].p2p.wait_for_disconnect() # mininode must be disconnected at this point assert_equal(len(self.nodes[0].getpeerinfo()), 0) if __name__ == '__main__': P2PMempoolTests().main() diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py index fa8cbee5a..3d2aabce8 100755 --- a/test/functional/p2p_timeouts.py +++ b/test/functional/p2p_timeouts.py @@ -1,80 +1,80 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -""" TimeoutsTest -- test various net timeouts (only in extended tests) +"""Test various net timeouts. - Create three bitcoind nodes: no_verack_node - we never send a verack in response to their version no_version_node - we never send a version (only a ping) no_send_node - we never send any P2P message. - Start all three nodes - Wait 1 second - Assert that we're connected - Send a ping to no_verack_node and no_version_node - Wait 30 seconds - Assert that we're still connected - Send a ping to no_verack_node and no_version_node - Wait 31 seconds - Assert that we're no longer connected (timeout to receive version/verack is 60 seconds) """ from time import sleep from test_framework.messages import msg_ping from test_framework.mininode import network_thread_start, P2PInterface from test_framework.test_framework import BitcoinTestFramework class TestNode(P2PInterface): def on_version(self, message): # Don't send a verack in response pass class TimeoutsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): # Setup the p2p connections and start up the network thread. no_verack_node = self.nodes[0].add_p2p_connection(TestNode()) no_version_node = self.nodes[0].add_p2p_connection( TestNode(), send_version=False) no_send_node = self.nodes[0].add_p2p_connection( TestNode(), send_version=False) network_thread_start() sleep(1) assert no_verack_node.connected assert no_version_node.connected assert no_send_node.connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) sleep(30) assert "version" in no_verack_node.last_message assert no_verack_node.connected assert no_version_node.connected assert no_send_node.connected no_verack_node.send_message(msg_ping()) no_version_node.send_message(msg_ping()) sleep(31) assert not no_verack_node.connected assert not no_version_node.connected assert not no_send_node.connected if __name__ == '__main__': TimeoutsTest().main() diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py index 1bb87ffb5..52ec2ad44 100755 --- a/test/functional/p2p_unrequested_blocks.py +++ b/test/functional/p2p_unrequested_blocks.py @@ -1,367 +1,365 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -import time - -from test_framework.blocktools import ( - create_block, - create_coinbase, - create_transaction, -) -from test_framework.messages import ( - CBlockHeader, - CInv, - msg_block, - msg_headers, - msg_inv, -) -from test_framework.mininode import ( - mininode_lock, - network_thread_join, - network_thread_start, - P2PInterface, -) -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - assert_raises_rpc_error, - connect_nodes, - sync_blocks, -) - -''' -AcceptBlockTest -- test processing of unrequested blocks. +"""Test processing of unrequested blocks. Setup: two nodes, node0+node1, not connected to each other. Node1 will have nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks. We have one P2PInterface connection to node0 called test_node, and one to node1 called min_work_node. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance for node0, but node1 should skip processing due to nMinimumChainWork. Node1 is unused in tests 3-7: 3. Mine a block that forks from the genesis block, and deliver to test_node. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more or equal work to the tip. 4a,b. Send another two blocks that build on the forking block. Node0 should process the second block but be stuck on the shorter chain, because it's missing an intermediate block. 4c.Send 288 more blocks on the longer chain (the number of blocks ahead we currently store). Node0 should process all but the last block (too far ahead in height). 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. 8. Create a fork which is invalid at a height longer than the current chain (ie to which the node will try to reorg) but which has headers built on top of the invalid block. Check that we get disconnected if we send more headers on the chain the node now knows to be invalid. 9. Test Node1 is able to sync when connected to node0 (which should have sufficient work on its chain). -''' +""" + +import time + +from test_framework.blocktools import ( + create_block, + create_coinbase, + create_transaction, +) +from test_framework.messages import ( + CBlockHeader, + CInv, + msg_block, + msg_headers, + msg_inv, +) +from test_framework.mininode import ( + mininode_lock, + network_thread_join, + network_thread_start, + P2PInterface, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + connect_nodes, + sync_blocks, +) class AcceptBlockTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg"], ["-minimumchainwork=0x10"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. # Node2 will be used for non-whitelisted peers to test the interaction # with nMinimumChainWork. self.setup_nodes() def run_test(self): # Setup the p2p connections and start up the network thread. # test_node connects to node0 (not whitelisted) test_node = self.nodes[0].add_p2p_connection(P2PInterface()) # min_work_node connects to node1 (whitelisted) min_work_node = self.nodes[1].add_p2p_connection(P2PInterface()) network_thread_start() # Test logic begins here test_node.wait_for_verack() min_work_node.wait_for_verack() # 1. Have nodes mine a block (leave IBD) [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted by node0 blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append(create_block( tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) min_work_node.send_message(msg_block(blocks_h2[1])) for x in [test_node, min_work_node]: x.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 1) self.log.info( "First height 2 block accepted by node0; correctly rejected by node1") # 3. Send another block that builds on genesis. block_h1f = create_block( int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time) block_time += 1 block_h1f.solve() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h1f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash) # 4. Send another two block that build on the fork. block_h2f = create_block( block_h1f.sha256, create_coinbase(2), block_time) block_time += 1 block_h2f.solve() test_node.send_message(msg_block(block_h2f)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h2f.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) # But this block should be accepted by node since it has equal work. self.nodes[0].getblock(block_h2f.hash) self.log.info("Second height 2 block accepted, but not reorg'ed to") # 4b. Now send another block that builds on the forking chain. block_h3 = create_block( block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1) block_h3.solve() test_node.send_message(msg_block(block_h3)) test_node.sync_with_ping() # Since the earlier block was not processed by node, the new block # can't be fully validated. tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_h3.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) self.nodes[0].getblock(block_h3.hash) # But this block should be accepted by node since it has more work. self.nodes[0].getblock(block_h3.hash) self.log.info("Unrequested more-work block accepted") # 4c. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node (as long as its not missing any headers) tip = block_h3 all_blocks = [] for i in range(288): next_block = create_block( tip.sha256, create_coinbase(i + 4), tip.nTime+1) next_block.solve() all_blocks.append(next_block) tip = next_block # Now send the block at height 5 and check that it wasn't accepted (missing header) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash) assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash) # The block at height 5 should be accepted if we provide the missing header, though headers_message = msg_headers() headers_message.headers.append(CBlockHeader(all_blocks[0])) test_node.send_message(headers_message) test_node.send_message(msg_block(all_blocks[1])) test_node.sync_with_ping() self.nodes[0].getblock(all_blocks[1].hash) # Now send the blocks in all_blocks for i in range(288): test_node.send_message(msg_block(all_blocks[i])) test_node.sync_with_ping() # Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_rpc_error( -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). # The node should have requested the blocks at some point, so # disconnect/reconnect first self.nodes[0].disconnect_p2ps() self.nodes[1].disconnect_p2ps() network_thread_join() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_message.pop("getdata", None) test_node.send_message(msg_inv([CInv(2, block_h3.sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_message["getdata"] # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, block_h1f.sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(block_h1f)) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.nodes[0].getblock(all_blocks[286].hash) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") # 8. Create a chain which is invalid at a height longer than the # current chain, but which has more blocks on top of that block_289f = create_block( all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1) block_289f.solve() block_290f = create_block( block_289f.sha256, create_coinbase(290), block_289f.nTime+1) block_290f.solve() block_291 = create_block( block_290f.sha256, create_coinbase(291), block_290f.nTime+1) # block_291 spends a coinbase below maturity! block_291.vtx.append(create_transaction( block_290f.vtx[0], 0, b"42", 1)) block_291.hashMerkleRoot = block_291.calc_merkle_root() block_291.solve() block_292 = create_block( block_291.sha256, create_coinbase(292), block_291.nTime+1) block_292.solve() # Now send all the headers on the chain and enough blocks to trigger reorg headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_289f)) headers_message.headers.append(CBlockHeader(block_290f)) headers_message.headers.append(CBlockHeader(block_291)) headers_message.headers.append(CBlockHeader(block_292)) test_node.send_message(headers_message) test_node.sync_with_ping() tip_entry_found = False for x in self.nodes[0].getchaintips(): if x['hash'] == block_292.hash: assert_equal(x['status'], "headers-only") tip_entry_found = True assert(tip_entry_found) assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash) test_node.send_message(msg_block(block_289f)) test_node.send_message(msg_block(block_290f)) test_node.sync_with_ping() self.nodes[0].getblock(block_289f.hash) self.nodes[0].getblock(block_290f.hash) test_node.send_message(msg_block(block_291)) # At this point we've sent an obviously-bogus block, wait for full processing # without assuming whether we will be disconnected or not try: # Only wait a short while so the test doesn't take forever if we do get # disconnected test_node.sync_with_ping(timeout=1) except AssertionError: test_node.wait_for_disconnect() self.nodes[0].disconnect_p2ps() test_node = self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() test_node.wait_for_verack() # We should have failed reorg and switched back to 290 (but have block 291) assert_equal(self.nodes[0].getblockcount(), 290) assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash) assert_equal(self.nodes[0].getblock( block_291.hash)["confirmations"], -1) # Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected block_293 = create_block( block_292.sha256, create_coinbase(293), block_292.nTime+1) block_293.solve() headers_message = msg_headers() headers_message.headers.append(CBlockHeader(block_293)) test_node.send_message(headers_message) test_node.wait_for_disconnect() # 9. Connect node1 to node0 and ensure it is able to sync connect_nodes(self.nodes[0], self.nodes[1]) sync_blocks([self.nodes[0], self.nodes[1]]) self.log.info("Successfully synced nodes 1 and 0") if __name__ == '__main__': AcceptBlockTest().main() diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py index c782e71a0..5084f67d0 100755 --- a/test/functional/rpc_bind.py +++ b/test/functional/rpc_bind.py @@ -1,142 +1,141 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# Test for -rpcbind, as well as -rpcallowip and -rpcconnect +"""Test running bitcoind with the -rpcbind and -rpcallowip options.""" from platform import uname import socket import sys from test_framework.netutil import addr_to_hex, all_interfaces, get_bind_addrs from test_framework.test_framework import BitcoinTestFramework, SkipTest from test_framework.util import ( assert_equal, assert_raises_rpc_error, get_rpc_proxy, rpc_port, rpc_url, ) class RPCBindTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.bind_to_localhost_only = False self.num_nodes = 1 def setup_network(self): self.add_nodes(self.num_nodes, None) def run_bind_test(self, allow_ips, connect_to, addresses, expected): ''' Start a node with requested rpcallowip and rpcbind parameters, then try to connect, and check if the set of bound addresses matches the expected set. ''' self.log.info("Bind test for {}".format(str(addresses))) expected = [(addr_to_hex(addr), port) for (addr, port) in expected] base_args = ['-disablewallet', '-nolisten'] if allow_ips: base_args += ['-rpcallowip=' + x for x in allow_ips] binds = ['-rpcbind=' + addr for addr in addresses] parts = connect_to.split(':') if len(parts) == 2: self.nodes[0].host = parts[0] self.nodes[0].rpc_port = parts[1] else: self.nodes[0].host = connect_to self.nodes[0].rpc_port = rpc_port(self.nodes[0].index) self.start_node(0, base_args + binds) pid = self.nodes[0].process.pid assert_equal(set(get_bind_addrs(pid)), set(expected)) self.stop_nodes() def run_allowip_test(self, allow_ips, rpchost, rpcport): ''' Start a node with rpcallow IP, and request getnetworkinfo at a non-localhost IP. ''' self.log.info("Allow IP test for {}:{}".format(rpchost, rpcport)) base_args = ['-disablewallet', '-nolisten'] + \ ['-rpcallowip=' + x for x in allow_ips] self.nodes[0].host = None self.start_nodes([base_args]) # connect to node through non-loopback interface url = rpc_url(self.nodes[0].datadir, rpchost, rpcport) node = get_rpc_proxy(url, 0, coveragedir=self.options.coveragedir) node.getnetworkinfo() self.stop_nodes() def run_test(self): # due to OS-specific network stats queries, this test works only on Linux if not sys.platform.startswith('linux'): raise SkipTest("This test can only be run on linux.") # WSL in currently not supported (refer to # https://reviews.bitcoinabc.org/T400 for details). # This condition should be removed once netstat support is provided by # Microsoft. if "microsoft" in uname().version.lower(): raise SkipTest( "Running this test on WSL is currently not supported") # find the first non-loopback interface for testing non_loopback_ip = None for name, ip in all_interfaces(): if ip != '127.0.0.1': non_loopback_ip = ip break if non_loopback_ip is None: raise SkipTest( "This test requires at least one non-loopback IPv4 interface.") try: s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) s.connect(("::1", 1)) s.close except OSError: raise SkipTest("This test requires IPv6 support.") self.log.info("Using interface {} for testing".format(non_loopback_ip)) defaultport = rpc_port(0) # check default without rpcallowip (IPv4 and IPv6 localhost) self.run_bind_test(None, '127.0.0.1', [], [('127.0.0.1', defaultport), ('::1', defaultport)]) # check default with rpcallowip (IPv6 any) self.run_bind_test(['127.0.0.1'], '127.0.0.1', [], [('::0', defaultport)]) # check only IPv4 localhost (explicit) self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'], [('127.0.0.1', defaultport)]) # check only IPv4 localhost (explicit) with alternative port self.run_bind_test( ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'], [('127.0.0.1', 32171)]) # check only IPv4 localhost (explicit) with multiple alternative ports # on same host self.run_bind_test( ['127.0.0.1'], '127.0.0.1:32171', [ '127.0.0.1:32171', '127.0.0.1:32172'], [('127.0.0.1', 32171), ('127.0.0.1', 32172)]) # check only IPv6 localhost (explicit) self.run_bind_test(['[::1]'], '[::1]', ['[::1]'], [('::1', defaultport)]) # check both IPv4 and IPv6 localhost (explicit) self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'], [('127.0.0.1', defaultport), ('::1', defaultport)]) # check only non-loopback interface self.run_bind_test( [non_loopback_ip], non_loopback_ip, [non_loopback_ip], [(non_loopback_ip, defaultport)]) # Check that with invalid rpcallowip, we are denied self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport) assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport) if __name__ == '__main__': RPCBindTest().main() diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py index 65240ef93..e1b94d717 100755 --- a/test/functional/rpc_decodescript.py +++ b/test/functional/rpc_decodescript.py @@ -1,216 +1,217 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test decoding scripts via decodescript RPC command.""" from test_framework.messages import CTransaction, FromHex, ToHex from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, bytes_to_hex_str, hex_str_to_bytes class DecodeScriptTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def decodescript_script_sig(self): signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' push_signature = '48' + signature public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key # below are test cases for all of the standard transaction types # 1) P2PK scriptSig # the scriptSig of a public key scriptPubKey simply pushes a signature # onto the stack rpc_result = self.nodes[0].decodescript(push_signature) assert_equal(signature, rpc_result['asm']) # 2) P2PKH scriptSig rpc_result = self.nodes[0].decodescript( push_signature + push_public_key) assert_equal(signature + ' ' + public_key, rpc_result['asm']) # 3) multisig scriptSig # this also tests the leading portion of a P2SH multisig scriptSig # OP_0 rpc_result = self.nodes[0].decodescript( '00' + push_signature + push_signature) assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm']) # 4) P2SH scriptSig # an empty P2SH redeemScript is valid and makes for a very simple test case. # thus, such a spending scriptSig would just need to pass the outer redeemScript # hash test and leave true on the top of the stack. rpc_result = self.nodes[0].decodescript('5100') assert_equal('1 0', rpc_result['asm']) # 5) null data scriptSig - no such thing because null data scripts can not be spent. # thus, no test case for that standard transaction type is here. def decodescript_script_pub_key(self): public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777' push_public_key_hash = '14' + public_key_hash # below are test cases for all of the standard transaction types # 1) P2PK scriptPubKey # OP_CHECKSIG rpc_result = self.nodes[0].decodescript(push_public_key + 'ac') assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm']) # 2) P2PKH scriptPubKey # OP_DUP OP_HASH160 OP_EQUALVERIFY OP_CHECKSIG rpc_result = self.nodes[0].decodescript( '76a9' + push_public_key_hash + '88ac') assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm']) # 3) multisig scriptPubKey # OP_CHECKMULTISIG # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even # though it is unrealistic. rpc_result = self.nodes[0].decodescript( '52' + push_public_key + push_public_key + push_public_key + '53ae') assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) # 4) P2SH scriptPubKey # OP_HASH160 OP_EQUAL. # push_public_key_hash here should actually be the hash of a redeem script. # but this works the same for purposes of this test. rpc_result = self.nodes[0].decodescript( 'a9' + push_public_key_hash + '87') assert_equal( 'OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm']) # 5) null data scriptPubKey # use a signature look-alike here to make sure that we do not decode random data as a signature. # this matters if/when signature sighash decoding comes along. # would want to make sure that no such decoding takes place in this # case. signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' # OP_RETURN rpc_result = self.nodes[0].decodescript('6a' + signature_imposter) assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm']) # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here. # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY. # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even though it is unrealistic. # # OP_IF # OP_CHECKSIGVERIFY # OP_ELSE # OP_CHECKLOCKTIMEVERIFY OP_DROP # OP_ENDIF # OP_CHECKSIG # # lock until block 500,000 rpc_result = self.nodes[0].decodescript( '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac') assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) def decoderawtransaction_asm_sighashtype(self): - """Tests decoding scripts via RPC command "decoderawtransaction". + """Test decoding scripts via RPC command "decoderawtransaction". This test is in with the "decodescript" tests because they are testing the same "asm" script decodes. """ # this test case uses a random plain vanilla mainnet transaction with a # single P2PKH input and output tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( '304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs. # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc # verify that we have not altered scriptPubKey decoding. tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( '8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) assert_equal( '0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal( 'OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal( 'OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) txSave = FromHex(CTransaction(), tx) # make sure that a specifically crafted op_return value will not pass # all the IsDERSignature checks and then get decoded as a sighash type tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) # verify that we have not altered scriptPubKey processing even of a # specially crafted P2PKH pubkeyhash and P2SH redeem script hash that # is made to pass the der signature checks tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( 'OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal( 'OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) # some more full transaction tests of varying specific scriptSigs. used instead of # tests in decodescript_script_sig because the decodescript RPC is specifically # for working on scriptPubKeys (argh!). push_signature = bytes_to_hex_str( txSave.vin[0].scriptSig)[2:(0x48 * 2 + 4)] signature = push_signature[2:] der_signature = signature[:-2] signature_sighash_decoded = der_signature + '[ALL]' signature_2 = der_signature + '82' push_signature_2 = '48' + signature_2 signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' # 1) P2PK scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # make sure that the sighash decodes come out correctly for a more # complex / lesser used case. txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 2) multisig scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes( '00' + push_signature + push_signature_2) rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 3) test a scriptSig that contains more than push operations. # in fact, it contains an OP_RETURN with data specially crafted to # cause improper decode if the code does not catch it. txSave.vin[0].scriptSig = hex_str_to_bytes( '6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction(ToHex(txSave)) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) def run_test(self): self.decodescript_script_sig() self.decodescript_script_pub_key() self.decoderawtransaction_asm_sighashtype() if __name__ == '__main__': DecodeScriptTest().main() diff --git a/test/functional/rpc_getchaintips.py b/test/functional/rpc_getchaintips.py index 01f75be14..9c4f09cd5 100755 --- a/test/functional/rpc_getchaintips.py +++ b/test/functional/rpc_getchaintips.py @@ -1,62 +1,65 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the getchaintips RPC. -# Exercise the getchaintips API. We introduce a network split, work -# on chains of different lengths, and join the network together again. -# This gives us two tips, verify that it works. +- introduce a network split +- work on chains of different lengths +- join the network together again +- verify that getchaintips now returns two chain tips. +""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class GetChainTipsTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def run_test(self): tips = self.nodes[0].getchaintips() assert_equal(len(tips), 1) assert_equal(tips[0]['branchlen'], 0) assert_equal(tips[0]['height'], 200) assert_equal(tips[0]['status'], 'active') # Split the network and build two chains of different lengths. self.split_network() self.nodes[0].generate(10) self.nodes[2].generate(20) self.sync_all([self.nodes[:2], self.nodes[2:]]) tips = self.nodes[1].getchaintips() assert_equal(len(tips), 1) shortTip = tips[0] assert_equal(shortTip['branchlen'], 0) assert_equal(shortTip['height'], 210) assert_equal(tips[0]['status'], 'active') tips = self.nodes[3].getchaintips() assert_equal(len(tips), 1) longTip = tips[0] assert_equal(longTip['branchlen'], 0) assert_equal(longTip['height'], 220) assert_equal(tips[0]['status'], 'active') # Join the network halves and check that we now have two tips # (at least at the nodes that previously had the short chain). self.join_network() tips = self.nodes[0].getchaintips() assert_equal(len(tips), 2) assert_equal(tips[0], longTip) assert_equal(tips[1]['branchlen'], 10) assert_equal(tips[1]['status'], 'valid-fork') tips[1]['branchlen'] = 0 tips[1]['status'] = 'active' assert_equal(tips[1], shortTip) if __name__ == '__main__': GetChainTipsTest().main() diff --git a/test/functional/rpc_invalidateblock.py b/test/functional/rpc_invalidateblock.py index 9183deb76..ebb2e3d64 100755 --- a/test/functional/rpc_invalidateblock.py +++ b/test/functional/rpc_invalidateblock.py @@ -1,76 +1,73 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test InvalidateBlock code -# +"""Test the invalidateblock RPC.""" import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, connect_nodes_bi, sync_blocks class InvalidateTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-noparkdeepreorg"], [], []] def setup_network(self): self.setup_nodes() def run_test(self): self.log.info( "Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:") self.log.info("Mine 4 blocks on Node 0") self.nodes[0].generate(4) assert(self.nodes[0].getblockcount() == 4) besthash = self.nodes[0].getbestblockhash() self.log.info("Mine competing 6 blocks on Node 1") self.nodes[1].generate(6) assert(self.nodes[1].getblockcount() == 6) self.log.info("Connect nodes to force a reorg") connect_nodes_bi(self.nodes[0], self.nodes[1]) sync_blocks(self.nodes[0:2]) assert(self.nodes[0].getblockcount() == 6) badhash = self.nodes[1].getblockhash(2) self.log.info( "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain") self.nodes[0].invalidateblock(badhash) newheight = self.nodes[0].getblockcount() newhash = self.nodes[0].getbestblockhash() if (newheight != 4 or newhash != besthash): raise AssertionError( "Wrong tip for node0, hash {}, height {}".format(newhash, newheight)) self.log.info("\nMake sure we won't reorg to a lower work chain:") connect_nodes_bi(self.nodes[1], self.nodes[2]) self.log.info("Sync node 2 to node 1 so both have 6 blocks") sync_blocks(self.nodes[1:3]) assert(self.nodes[2].getblockcount() == 6) self.log.info("Invalidate block 5 on node 1 so its tip is now at 4") self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5)) assert(self.nodes[1].getblockcount() == 4) self.log.info("Invalidate block 3 on node 2, so its tip is now 2") self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3)) assert(self.nodes[2].getblockcount() == 2) self.log.info("..and then mine a block") self.nodes[2].generate(1) self.log.info("Verify all nodes are at the right height") time.sleep(5) assert_equal(self.nodes[2].getblockcount(), 3) assert_equal(self.nodes[0].getblockcount(), 4) node1height = self.nodes[1].getblockcount() if node1height < 4: raise AssertionError( "Node 1 reorged to a lower height: {}".format(node1height)) if __name__ == '__main__': InvalidateTest().main() diff --git a/test/functional/rpc_listtransactions.py b/test/functional/rpc_listtransactions.py index 548cdf3a7..ec3f46142 100755 --- a/test/functional/rpc_listtransactions.py +++ b/test/functional/rpc_listtransactions.py @@ -1,98 +1,97 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# Exercise the listtransactions API +"""Test the listtransactions API.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_array_result class ListTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): # Leave IBD self.nodes[0].generate(1) # Simple send, 0 to 1: txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid}, {"category": "send", "account": "", "amount": Decimal("-0.1"), "confirmations": 0}) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "account": "", "amount": Decimal("0.1"), "confirmations": 0}) # mine a block, confirmations should change: self.nodes[0].generate(1) self.sync_all() assert_array_result(self.nodes[0].listtransactions(), {"txid": txid}, {"category": "send", "account": "", "amount": Decimal("-0.1"), "confirmations": 1}) assert_array_result(self.nodes[1].listtransactions(), {"txid": txid}, {"category": "receive", "account": "", "amount": Decimal("0.1"), "confirmations": 1}) # send-to-self: txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "send"}, {"amount": Decimal("-0.2")}) assert_array_result(self.nodes[0].listtransactions(), {"txid": txid, "category": "receive"}, {"amount": Decimal("0.2")}) # sendmany from node1: twice to self, twice to node2: send_to = {self.nodes[0].getnewaddress(): 0.11, self.nodes[1].getnewaddress(): 0.22, self.nodes[0].getaccountaddress("from1"): 0.33, self.nodes[1].getaccountaddress("toself"): 0.44} txid = self.nodes[1].sendmany("", send_to) self.sync_all() assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.11")}, {"txid": txid}) assert_array_result(self.nodes[0].listtransactions(), {"category": "receive", "amount": Decimal("0.11")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.22")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "receive", "amount": Decimal("0.22")}, {"txid": txid}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.33")}, {"txid": txid}) assert_array_result(self.nodes[0].listtransactions(), {"category": "receive", "amount": Decimal("0.33")}, {"txid": txid, "account": "from1"}) assert_array_result(self.nodes[1].listtransactions(), {"category": "send", "amount": Decimal("-0.44")}, {"txid": txid, "account": ""}) assert_array_result(self.nodes[1].listtransactions(), {"category": "receive", "amount": Decimal("0.44")}, {"txid": txid, "account": "toself"}) pubkey = self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress())['pubkey'] multisig = self.nodes[1].createmultisig(1, [pubkey]) self.nodes[0].importaddress( multisig["redeemScript"], "watchonly", False, True) txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1) self.nodes[1].generate(1) self.sync_all() assert( len(self.nodes[0].listtransactions("watchonly", 100, 0, False)) == 0) assert_array_result( self.nodes[0].listtransactions("watchonly", 100, 0, True), {"category": "receive", "amount": Decimal("0.1")}, {"txid": txid, "account": "watchonly"}) if __name__ == '__main__': ListTransactionsTest().main() diff --git a/test/functional/rpc_named_arguments.py b/test/functional/rpc_named_arguments.py index 502733a92..2ac338399 100755 --- a/test/functional/rpc_named_arguments.py +++ b/test/functional/rpc_named_arguments.py @@ -1,34 +1,35 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test using named arguments for RPCs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class NamedArgumentTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): node = self.nodes[0] h = node.help(command='getblockchaininfo') assert(h.startswith('getblockchaininfo\n')) assert_raises_rpc_error(-8, 'Unknown named parameter', node.help, random='getblockchaininfo') h = node.getblockhash(height=0) node.getblock(blockhash=h) assert_equal(node.echo(), []) assert_equal(node.echo(arg0=0, arg9=9), [0] + [None] * 8 + [9]) assert_equal(node.echo(arg1=1), [None, 1]) assert_equal(node.echo(arg9=None), [None] * 10) assert_equal(node.echo(arg0=0, arg3=3, arg9=9), [0] + [None] * 2 + [3] + [None] * 5 + [9]) if __name__ == '__main__': NamedArgumentTest().main() diff --git a/test/functional/rpc_preciousblock.py b/test/functional/rpc_preciousblock.py index c3277bbb3..32acd1163 100755 --- a/test/functional/rpc_preciousblock.py +++ b/test/functional/rpc_preciousblock.py @@ -1,127 +1,124 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test PreciousBlock code -# +"""Test the preciousblock RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, sync_blocks, ) def unidirectional_node_sync_via_rpc(node_src, node_dest): blocks_to_copy = [] blockhash = node_src.getbestblockhash() while True: try: assert(len(node_dest.getblock(blockhash, False)) > 0) break except: blocks_to_copy.append(blockhash) blockhash = node_src.getblockheader( blockhash, True)['previousblockhash'] blocks_to_copy.reverse() for blockhash in blocks_to_copy: blockdata = node_src.getblock(blockhash, False) assert(node_dest.submitblock(blockdata) in (None, 'inconclusive')) def node_sync_via_rpc(nodes): for node_src in nodes: for node_dest in nodes: if node_src is node_dest: continue unidirectional_node_sync_via_rpc(node_src, node_dest) class PreciousTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], ["-noparkdeepreorg"]] def setup_network(self): self.setup_nodes() def run_test(self): self.log.info( "Ensure submitblock can in principle reorg to a competing chain") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 1) hashZ = self.nodes[1].generate(2)[-1] assert_equal(self.nodes[1].getblockcount(), 2) node_sync_via_rpc(self.nodes[0:3]) assert_equal(self.nodes[0].getbestblockhash(), hashZ) self.log.info("Mine blocks A-B-C on Node 0") hashC = self.nodes[0].generate(3)[-1] assert_equal(self.nodes[0].getblockcount(), 5) self.log.info("Mine competing blocks E-F-G on Node 1") hashG = self.nodes[1].generate(3)[-1] assert_equal(self.nodes[1].getblockcount(), 5) assert(hashC != hashG) self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we # proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) connect_nodes_bi(self.nodes[0], self.nodes[1]) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block C again") self.nodes[0].preciousblock(hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block C") self.nodes[1].preciousblock(hashC) # wait because node 1 may not have downloaded hashC sync_blocks(self.nodes[0:2]) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block G again") self.nodes[1].preciousblock(hashG) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G again") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node1 prefer block C again") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info( "Mine another block (E-F-G-)H on Node 0 and reorg Node 1") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 6) sync_blocks(self.nodes[0:2]) hashH = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Node1 should not be able to prefer block C anymore") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Mine competing blocks I-J-K-L on Node 2") self.nodes[2].generate(4) assert_equal(self.nodes[2].getblockcount(), 6) hashL = self.nodes[2].getbestblockhash() self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) connect_nodes_bi(self.nodes[1], self.nodes[2]) connect_nodes_bi(self.nodes[0], self.nodes[2]) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) self.log.info("Make Node1 prefer block L") self.nodes[1].preciousblock(hashL) assert_equal(self.nodes[1].getbestblockhash(), hashL) self.log.info("Make Node2 prefer block H") self.nodes[2].preciousblock(hashH) assert_equal(self.nodes[2].getbestblockhash(), hashH) if __name__ == '__main__': PreciousTest().main() diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 8ef73e31d..c66659d22 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -1,480 +1,481 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""rawtranscation RPCs QA test. - -# Tests the following RPCs: -# - createrawtransaction -# - signrawtransactionwithwallet -# - sendrawtransaction -# - decoderawtransaction -# - getrawtransaction +"""Test the rawtranscation RPCs. + +Test the following RPCs: + - createrawtransaction + - signrawtransactionwithwallet + - sendrawtransaction + - decoderawtransaction + - getrawtransaction """ + from decimal import Decimal from collections import OrderedDict from io import BytesIO from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_raw_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes_bi, hex_str_to_bytes, bytes_to_hex_str, ) from test_framework.messages import ( CTransaction, ) class multidict(dict): """Dictionary that allows duplicate keys. Constructed with a list of (key, value) tuples. When dumped by the json module, will output invalid json with repeated keys, eg: >>> json.dumps(multidict([(1,2),(1,2)]) '{"1": 2, "1": 2}' Used to test calls to rpc methods with repeated keys in the json object.""" def __init__(self, x): dict.__init__(self, x) self.x = x def items(self): return self.x # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self, split=False): super().setup_network() connect_nodes_bi(self.nodes[0], self.nodes[2]) def run_test(self): self.log.info( 'prepare some coins for multiple *rawtransaction commands') self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(101) self.sync_all() self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) self.sync_all() self.nodes[0].generate(5) self.sync_all() self.log.info( 'Test getrawtransaction on genesis block coinbase returns an error') block = self.nodes[0].getblock(self.nodes[0].getblockhash(0)) assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot']) self.log.info( 'Check parameter types and required parameters of createrawtransaction') # Test `createrawtransaction` required parameters assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction) assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, []) # Test `createrawtransaction` invalid extra parameters assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, 'foo') # Test `createrawtransaction` invalid `inputs` txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000' assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {}) assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {}) assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{}], {}) assert_raises_rpc_error(-8, "txid must be hexadecimal string", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {}) assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {}) assert_raises_rpc_error(-8, "Invalid parameter, vout must be a number", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {}) assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {}) assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {}) # Test `createrawtransaction` invalid `outputs` address = self.nodes[0].getnewaddress() address2 = self.nodes[0].getnewaddress() assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo') # Should not throw for backwards compatibility self.nodes[0].createrawtransaction(inputs=[], outputs={}) self.nodes[0].createrawtransaction(inputs=[], outputs=[]) assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'}) assert_raises_rpc_error(-5, "Invalid Bitcoin address", self.nodes[0].createrawtransaction, [], {'foo': 0}) assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'}) assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1}) assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format( address), self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)])) assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: {}".format( address), self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}]) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}]) assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']]) # Test `createrawtransaction` invalid `locktime` assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo') assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1) assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296) self.log.info( 'Check that createrawtransaction accepts an array and object as outputs') tx = CTransaction() # One output tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction( inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99})))) assert_equal(len(tx.vout), 1) assert_equal( bytes_to_hex_str(tx.serialize()), self.nodes[2].createrawtransaction( inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]), ) # Two outputs tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)]))))) assert_equal(len(tx.vout), 2) assert_equal( bytes_to_hex_str(tx.serialize()), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {address: 99}, {address2: 99}]), ) # Two data outputs tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=multidict([('data', '99'), ('data', '99')]))))) assert_equal(len(tx.vout), 2) assert_equal( bytes_to_hex_str(tx.serialize()), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {'data': '99'}, {'data': '99'}]), ) # Multiple mixed outputs tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[ {'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), ('data', '99'), ('data', '99')]))))) assert_equal(len(tx.vout), 3) assert_equal( bytes_to_hex_str(tx.serialize()), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[ {address: 99}, {'data': '99'}, {'data': '99'}]), ) self.log.info('sendrawtransaction with missing input') # won't exists inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1}] outputs = {self.nodes[0].getnewaddress(): 4.998} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) rawtx = pad_raw_tx(rawtx) rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx) # This will raise an exception since there are missing inputs assert_raises_rpc_error( -25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex']) ##################################### # getrawtransaction with block hash # ##################################### # make a tx by sending then generate 2 blocks; block1 has the tx in it tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1) block1, block2 = self.nodes[2].generate(2) self.sync_all() # We should be able to get the raw transaction by providing the correct block gottx = self.nodes[0].getrawtransaction(tx, True, block1) assert_equal(gottx['txid'], tx) assert_equal(gottx['in_active_chain'], True) # We should not have the 'in_active_chain' flag when we don't provide a block gottx = self.nodes[0].getrawtransaction(tx, True) assert_equal(gottx['txid'], tx) assert 'in_active_chain' not in gottx # We should not get the tx if we provide an unrelated block assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2) # An invalid block hash should raise the correct errors assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, True) assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal", self.nodes[0].getrawtransaction, tx, True, "foobar") assert_raises_rpc_error(-8, "parameter 3 must be of length 64", self.nodes[0].getrawtransaction, tx, True, "abcd1234") assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000") # Undo the blocks and check in_active_chain self.nodes[0].invalidateblock(block1) gottx = self.nodes[0].getrawtransaction( txid=tx, verbose=True, blockhash=block1) assert_equal(gottx['in_active_chain'], False) self.nodes[0].reconsiderblock(block1) assert_equal(self.nodes[0].getbestblockhash(), block2) # # RAW TX MULTISIG TESTS # # # 2of2 test addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) # Tests for createmultisig and addmultisigaddress assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"]) # createmultisig can only take public keys self.nodes[0].createmultisig( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here. assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr1])['address'] # use balance deltas instead of absolute values bal = self.nodes[2].getbalance() # send 1.2 BCH to msig adr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[0].generate(1) self.sync_all() # node2 has both keys of the 2of2 ms addr., tx should affect the # balance assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000')) # 2of3 test from different nodes bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr3 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) addr3Obj = self.nodes[2].getaddressinfo(addr3) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address'] txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() # THIS IS AN INCOMPLETE FEATURE # NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND # COUNT AT BALANCE CALCULATION # for now, assume the funds of a 2of3 multisig tx are not marked as # spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = False for outpoint in rawTx['vout']: if outpoint['value'] == Decimal('2.20000000'): vout = outpoint break bal = self.nodes[0].getbalance() inputs = [{ "txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value'], }] outputs = {self.nodes[0].getnewaddress(): 2.19} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet( rawTx, inputs) # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned['complete'], False) rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs) # node2 can sign the tx compl., own two of three keys assert_equal(rawTxSigned['complete'], True) self.nodes[2].sendrawtransaction(rawTxSigned['hex']) rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance(), bal + Decimal( '50.00000000') + Decimal('2.19000000')) # block reward + tx rawTxBlock = self.nodes[0].getblock(self.nodes[0].getbestblockhash()) # 2of2 test for combining transactions bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].getaddressinfo(addr1) addr2Obj = self.nodes[2].getaddressinfo(addr2) self.nodes[1].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address'] mSigObjValid = self.nodes[2].getaddressinfo(mSigObj) txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() # the funds of a 2of2 multisig tx should not be marked as spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = False for outpoint in rawTx2['vout']: if outpoint['value'] == Decimal('2.20000000'): vout = outpoint break bal = self.nodes[0].getbalance() inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey'] ['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}] outputs = {self.nodes[0].getnewaddress(): 2.19} rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet( rawTx2, inputs) self.log.debug(rawTxPartialSigned1) # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned['complete'], False) rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet( rawTx2, inputs) self.log.debug(rawTxPartialSigned2) # node2 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned2['complete'], False) rawTxComb = self.nodes[2].combinerawtransaction( [rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']]) self.log.debug(rawTxComb) self.nodes[2].sendrawtransaction(rawTxComb) rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance( ), bal+Decimal('50.00000000')+Decimal('2.19000000')) # block reward + tx # getrawtransaction tests # 1. valid parameters - only supply txid txHash = rawTx["hash"] assert_equal( self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex']) # 2. valid parameters - supply txid and 0 for non-verbose assert_equal( self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex']) # 3. valid parameters - supply txid and False for non-verbose assert_equal(self.nodes[0].getrawtransaction( txHash, False), rawTxSigned['hex']) # 4. valid parameters - supply txid and 1 for verbose. # We only check the "hex" field of the output so we don't need to # update this test every time the output format changes. assert_equal(self.nodes[0].getrawtransaction( txHash, 1)["hex"], rawTxSigned['hex']) # 5. valid parameters - supply txid and True for non-verbose assert_equal(self.nodes[0].getrawtransaction( txHash, True)["hex"], rawTxSigned['hex']) # 6. invalid parameters - supply txid and string "Flase" assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "False") # 7. invalid parameters - supply txid and empty array assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txHash, []) # 8. invalid parameters - supply txid and empty dict assert_raises_rpc_error( -1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {}) # Sanity checks on verbose getrawtransaction output rawTxOutput = self.nodes[0].getrawtransaction(txHash, True) assert_equal(rawTxOutput["hex"], rawTxSigned["hex"]) assert_equal(rawTxOutput["txid"], txHash) assert_equal(rawTxOutput["hash"], txHash) assert_greater_than(rawTxOutput["size"], 300) assert_equal(rawTxOutput["version"], 0x02) assert_equal(rawTxOutput["locktime"], 0) assert_equal(len(rawTxOutput["vin"]), 1) assert_equal(len(rawTxOutput["vout"]), 1) assert_equal(rawTxOutput["blockhash"], rawTxBlock["hash"]) assert_equal(rawTxOutput["confirmations"], 3) assert_equal(rawTxOutput["time"], rawTxBlock["time"]) assert_equal(rawTxOutput["blocktime"], rawTxBlock["time"]) inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'sequence': 1000}] outputs = {self.nodes[0].getnewaddress(): 1} assert_raises_rpc_error( -8, 'Invalid parameter, missing vout key', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = "1" assert_raises_rpc_error( -8, 'Invalid parameter, vout must be a number', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = -1 assert_raises_rpc_error( -8, 'Invalid parameter, vout must be positive', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['vout'] = 1 rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 1000) # 9. invalid parameters - sequence number out of range inputs[0]['sequence'] = -1 assert_raises_rpc_error( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) # 10. invalid parameters - sequence number out of range inputs[0]['sequence'] = 4294967296 assert_raises_rpc_error( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) inputs[0]['sequence'] = 4294967294 rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) if __name__ == '__main__': RawTransactionsTest().main() diff --git a/test/functional/rpc_signmessage.py b/test/functional/rpc_signmessage.py index 9239b84df..cb0990315 100755 --- a/test/functional/rpc_signmessage.py +++ b/test/functional/rpc_signmessage.py @@ -1,41 +1,42 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test RPC commands for signing and verifying messages.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class SignMessagesTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): message = 'This is just a test message' self.log.info('test signing with priv_key') priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' expected_signature = 'IPDOIFcWd8LzOr70CXaal4+uG2ZZWcbHqutyGeO7AJ0MWbqq9C+u3KP9ScjtLzsIgY3st5n8XFQvgMZ0KrDQ9vg=' signature = self.nodes[0].signmessagewithprivkey(priv_key, message) assert_equal(expected_signature, signature) assert(self.nodes[0].verifymessage(address, signature, message)) self.log.info('test signing with an address with wallet') address = self.nodes[0].getnewaddress() signature = self.nodes[0].signmessage(address, message) assert(self.nodes[0].verifymessage(address, signature, message)) self.log.info('test verifying with another address should not work') other_address = self.nodes[0].getnewaddress() other_signature = self.nodes[0].signmessage(other_address, message) assert(not self.nodes[0].verifymessage( other_address, signature, message)) assert(not self.nodes[0].verifymessage( address, other_signature, message)) if __name__ == '__main__': SignMessagesTest().main() diff --git a/test/functional/rpc_txoutproof.py b/test/functional/rpc_txoutproof.py index e2bcf16f1..37b15c341 100755 --- a/test/functional/rpc_txoutproof.py +++ b/test/functional/rpc_txoutproof.py @@ -1,132 +1,129 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test merkleblock fetch/validation -# +"""Test gettxoutproof and verifytxoutproof RPCs.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, ) from test_framework.messages import CMerkleBlock, FromHex, ToHex class MerkleBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True # Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing self.extra_args = [[], [], [], ["-txindex"]] def setup_network(self): self.setup_nodes() connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[0], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[3]) self.sync_all() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(105) self.sync_all() chain_height = self.nodes[1].getblockcount() assert_equal(chain_height, 105) assert_equal(self.nodes[1].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 0) node0utxos = self.nodes[0].listunspent(1) tx1 = self.nodes[0].createrawtransaction( [node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) txid1 = self.nodes[0].sendrawtransaction( self.nodes[0].signrawtransactionwithwallet(tx1)["hex"]) tx2 = self.nodes[0].createrawtransaction( [node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) txid2 = self.nodes[0].sendrawtransaction( self.nodes[0].signrawtransactionwithwallet(tx2)["hex"]) # This will raise an exception because the transaction is not yet in a block assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].gettxoutproof, [txid1]) self.nodes[0].generate(1) blockhash = self.nodes[0].getblockhash(chain_height + 1) self.sync_all() txlist = [] blocktxn = self.nodes[0].getblock(blockhash, True)["tx"] txlist.append(blocktxn[1]) txlist.append(blocktxn[2]) assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1])), [txid1]) assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1, txid2])), txlist) assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist) txin_spent = self.nodes[1].listunspent(1).pop() tx3 = self.nodes[1].createrawtransaction( [txin_spent], {self.nodes[0].getnewaddress(): 49.98}) txid3 = self.nodes[0].sendrawtransaction( self.nodes[1].signrawtransactionwithwallet(tx3)["hex"]) self.nodes[0].generate(1) self.sync_all() txid_spent = txin_spent["txid"] txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2 # We can't find the block from a fully-spent tx assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[2].gettxoutproof, [txid_spent]) # We can get the proof if we specify the block assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent]) # We can't get the proof if we specify a non-existent block assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [ txid_spent], "00000000000000000000000000000000") # We can get the proof if the transaction is unspent assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent]) # We can get the proof if we provide a list of transactions and one of them is unspent. The ordering of the list should not matter. assert_equal(sorted(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist)) assert_equal(sorted(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist)) # We can always get a proof if we have a -txindex assert_equal(self.nodes[2].verifytxoutproof( self.nodes[3].gettxoutproof([txid_spent])), [txid_spent]) # We can't get a proof if we specify transactions from different blocks assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block", self.nodes[2].gettxoutproof, [txid1, txid3]) # Now we'll try tweaking a proof. proof = self.nodes[3].gettxoutproof([txid1, txid2]) assert txid1 in self.nodes[0].verifytxoutproof(proof) assert txid2 in self.nodes[1].verifytxoutproof(proof) tweaked_proof = FromHex(CMerkleBlock(), proof) # Make sure that our serialization/deserialization is working assert txid1 in self.nodes[2].verifytxoutproof(ToHex(tweaked_proof)) # Check to see if we can go up the merkle tree and pass this off as a # single-transaction block tweaked_proof.txn.nTransactions = 1 tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot] tweaked_proof.txn.vBits = [True] + [False]*7 for n in self.nodes: assert not n.verifytxoutproof(ToHex(tweaked_proof)) # TODO: try more variants, eg transactions at different depths, and # verify that the proofs are invalid if __name__ == '__main__': MerkleBlockTest().main() diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py index 882c96aec..c7a7737f9 100755 --- a/test/functional/rpc_users.py +++ b/test/functional/rpc_users.py @@ -1,161 +1,158 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# -# Test multiple rpc user config option rpcauth -# +"""Test multiple RPC users.""" import http.client import os import urllib.parse from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, get_datadir_path, str_to_b64str, ) class HTTPBasicsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def setup_chain(self): super().setup_chain() # Append rpcauth to bitcoin.conf before initialization rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" rpcuser = "rpcuser=rpcuser💻" rpcpassword = "rpcpassword=rpcpassword🔑" with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "bitcoin.conf"), 'a', encoding='utf8') as f: f.write(rpcauth + "\n") f.write(rpcauth2 + "\n") with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "bitcoin.conf"), 'a', encoding='utf8') as f: f.write(rpcuser + "\n") f.write(rpcpassword + "\n") def run_test(self): # # Check correctness of the rpcauth config option # # url = urllib.parse.urlparse(self.nodes[0].url) # Old authpair authpair = url.username + ':' + url.password # New authpair generated via share/rpcuser tool password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" # Second authpair with different username password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" authpairnew = "rt:" + password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 200) conn.close() # Use new authpair to confirm both work headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 200) conn.close() # Wrong login name with rt's password authpairnew = "rtwrong:" + password headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 401) conn.close() # Wrong password for rt authpairnew = "rt:" + password + "wrong" headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 401) conn.close() # Correct for rt2 authpairnew = "rt2:" + password2 headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 200) conn.close() # Wrong password for rt2 authpairnew = "rt2:" + password2 + "wrong" headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 401) conn.close() ############################################################### # Check correctness of the rpcuser/rpcpassword config options # ############################################################### url = urllib.parse.urlparse(self.nodes[1].url) # rpcuser and rpcpassword authpair rpcuserauthpair = "rpcuser💻:rpcpassword🔑" headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 200) conn.close() # Wrong login name with rpcuser's password rpcuserauthpair = "rpcuserwrong:rpcpassword" headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 401) conn.close() # Wrong password for rpcuser rpcuserauthpair = "rpcuser:rpcpasswordwrong" headers = {"Authorization": "Basic " + str_to_b64str(rpcuserauthpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status, 401) conn.close() if __name__ == '__main__': HTTPBasicsTest().main() diff --git a/test/functional/wallet_import_rescan.py b/test/functional/wallet_import_rescan.py index 1986fc3c0..fdbcfc37d 100755 --- a/test/functional/wallet_import_rescan.py +++ b/test/functional/wallet_import_rescan.py @@ -1,204 +1,205 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -''' +"""Test wallet import RPCs. + Test rescan behavior of importaddress, importpubkey, importprivkey, and importmulti RPCs with different types of keys and rescan options. In the first part of the test, node 0 creates an address for each type of import RPC call and node 0 sends BCH to it. Then other nodes import the addresses, and the test makes listtransactions and getbalance calls to confirm that the importing node either did or did not execute rescans picking up the send transactions. In the second part of the test, node 0 sends more BCH to each address, and the test makes more listtransactions and getbalance calls to confirm that the importing nodes pick up the new transactions regardless of whether rescans happened previously. -''' +""" import collections import enum import itertools from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, connect_nodes, set_node_times, sync_blocks, ) Call = enum.Enum("Call", "single multi") Data = enum.Enum("Data", "address pub priv") Rescan = enum.Enum("Rescan", "no yes late_timestamp") class Variant(collections.namedtuple("Variant", "call data rescan prune")): """Helper for importing one key and verifying scanned transactions.""" def try_rpc(self, func, *args, **kwargs): if self.expect_disabled: assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs) else: return func(*args, **kwargs) def do_import(self, timestamp): """Call one key import RPC.""" if self.call == Call.single: if self.data == Data.address: response = self.try_rpc(self.node.importaddress, self.address["address"], self.label, self.rescan == Rescan.yes) elif self.data == Data.pub: response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label, self.rescan == Rescan.yes) elif self.data == Data.priv: response = self.try_rpc( self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes) assert_equal(response, None) elif self.call == Call.multi: response = self.node.importmulti([{ "scriptPubKey": { "address": self.address["address"] }, "timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0), "pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [], "keys": [self.key] if self.data == Data.priv else [], "label": self.label, "watchonly": self.data != Data.priv }], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)}) assert_equal(response, [{"success": True}]) def check(self, txid=None, amount=None, confirmations=None): """Verify that getbalance/listtransactions return expected values.""" balance = self.node.getbalance(self.label, 0, True) assert_equal(balance, self.expected_balance) txs = self.node.listtransactions(self.label, 10000, 0, True) assert_equal(len(txs), self.expected_txs) if txid is not None: tx, = [tx for tx in txs if tx["txid"] == txid] assert_equal(tx["label"], self.label) assert_equal(tx["address"], self.address["address"]) assert_equal(tx["amount"], amount) assert_equal(tx["category"], "receive") assert_equal(tx["label"], self.label) assert_equal(tx["txid"], txid) assert_equal(tx["confirmations"], confirmations) assert_equal("trusted" not in tx, True) # Verify the transaction is correctly marked watchonly depending on # whether the transaction pays to an imported public key or # imported private key. The test setup ensures that transaction # inputs will not be from watchonly keys (important because # involvesWatchonly will be true if either the transaction output # or inputs are watchonly). if self.data != Data.priv: assert_equal(tx["involvesWatchonly"], True) else: assert_equal("involvesWatchonly" not in tx, True) # List of Variants for each way a key or address could be imported. IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))] # List of nodes to import keys to. Half the nodes will have pruning disabled, # half will have it enabled. Different nodes will be used for imports that are # expected to cause rescans, and imports that are not expected to cause # rescans, in order to prevent rescans during later imports picking up # transactions associated with earlier imports. This makes it easier to keep # track of expected balances and transactions. ImportNode = collections.namedtuple("ImportNode", "prune rescan") IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)] # Rescans start at the earliest block up to 2 hours before the key timestamp. TIMESTAMP_WINDOW = 2 * 60 * 60 class ImportRescanTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 + len(IMPORT_NODES) def setup_network(self): extra_args = [[] for _ in range(self.num_nodes)] for i, import_node in enumerate(IMPORT_NODES, 2): if import_node.prune: extra_args[i] += ["-prune=1"] self.add_nodes(self.num_nodes, extra_args=extra_args) self.start_nodes() for i in range(1, self.num_nodes): connect_nodes(self.nodes[i], self.nodes[0]) def run_test(self): # Create one transaction on node 0 with a unique amount and label for # each possible type of wallet import RPC. for i, variant in enumerate(IMPORT_VARIANTS): variant.label = "label {} {}".format(i, variant) variant.address = self.nodes[1].getaddressinfo( self.nodes[1].getnewaddress(variant.label)) variant.key = self.nodes[1].dumpprivkey(variant.address["address"]) variant.initial_amount = 10 - (i + 1) / 4.0 variant.initial_txid = self.nodes[0].sendtoaddress( variant.address["address"], variant.initial_amount) # Generate a block containing the initial transactions, then another # block further in the future (past the rescan window). self.nodes[0].generate(1) assert_equal(self.nodes[0].getrawmempool(), []) timestamp = self.nodes[0].getblockheader( self.nodes[0].getbestblockhash())["time"] set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1) self.nodes[0].generate(1) sync_blocks(self.nodes) # For each variation of wallet key import, invoke the import RPC and # check the results from getbalance and listtransactions. for variant in IMPORT_VARIANTS: variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled variant.node = self.nodes[ 2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))] variant.do_import(timestamp) if expect_rescan: variant.expected_balance = variant.initial_amount variant.expected_txs = 1 variant.check(variant.initial_txid, variant.initial_amount, 2) else: variant.expected_balance = 0 variant.expected_txs = 0 variant.check() # Create new transactions sending to each address. for i, variant in enumerate(IMPORT_VARIANTS): variant.sent_amount = 10 - (2 * i + 1) / 8.0 variant.sent_txid = self.nodes[0].sendtoaddress( variant.address["address"], variant.sent_amount) # Generate a block containing the new transactions. self.nodes[0].generate(1) assert_equal(self.nodes[0].getrawmempool(), []) sync_blocks(self.nodes) # Check the latest results from getbalance and listtransactions. for variant in IMPORT_VARIANTS: if not variant.expect_disabled: variant.expected_balance += variant.sent_amount variant.expected_txs += 1 variant.check(variant.sent_txid, variant.sent_amount, 1) else: variant.check() if __name__ == "__main__": ImportRescanTest().main() diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py index 9e1bdf6b7..ca780abc0 100755 --- a/test/functional/wallet_importmulti.py +++ b/test/functional/wallet_importmulti.py @@ -1,491 +1,491 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - +"""Test the importmulti RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, ) class ImportMultiTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self, split=False): self.setup_nodes() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(1) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] node0_address1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) # Check only one address assert_equal(node0_address1['ismine'], True) # Node 1 sync test assert_equal(self.nodes[1].getblockcount(), 1) # Address Test - before import address_info = self.nodes[1].getaddressinfo(node0_address1['address']) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) # RPC importmulti ----------------------------------------------- # Bitcoin Address self.log.info("Should import an address") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) watchonly_address = address['address'] watchonly_timestamp = timestamp self.log.info("Should not import an invalid address") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": "not valid address", }, "timestamp": "now", }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Invalid address') # ScriptPubKey + internal self.log.info("Should import a scriptPubKey with internal flag") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "internal": True }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + !internal self.log.info("Should not import a scriptPubKey without internal flag") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Address + Public key + !Internal self.log.info("Should import an address with public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "pubkeys": [address['pubkey']] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Public key + internal self.log.info( "Should import a scriptPubKey with internal and with public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "pubkeys": [address['pubkey']], "internal": True }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Public key + !internal self.log.info( "Should not import a scriptPubKey without internal and with public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "pubkeys": [address['pubkey']] }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Address + Private key + !watchonly self.log.info("Should import an address with private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], True) assert_equal(address_assert['timestamp'], timestamp) self.log.info( "Should not import an address with private key if is already imported") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -4) assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script') # Address + Private key + watchonly self.log.info( "Should not import an address with private key and with watchonly") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])], "watchonly": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Private key + internal self.log.info( "Should import a scriptPubKey with internal and with private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])], "internal": True }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], True) assert_equal(address_assert['timestamp'], timestamp) # ScriptPubKey + Private key + !internal self.log.info( "Should not import a scriptPubKey without internal and with private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address['address'])] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # P2SH address sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress( multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo( multi_sig_script['address']) assert_equal(address_assert['isscript'], True) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['timestamp'], timestamp) p2shunspent = self.nodes[1].listunspent( 0, 999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], False) # P2SH + Redeem script sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress( multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info("Should import a p2sh with respective redeem script") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo( multi_sig_script['address']) assert_equal(address_assert['timestamp'], timestamp) p2shunspent = self.nodes[1].listunspent( 0, 999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + !Watchonly sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress( multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info( "Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'], "keys": [self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])] }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo( multi_sig_script['address']) assert_equal(address_assert['timestamp'], timestamp) p2shunspent = self.nodes[1].listunspent( 0, 999999, [multi_sig_script['address']])[0] assert_equal(p2shunspent['spendable'], False) assert_equal(p2shunspent['solvable'], True) # P2SH + Redeem script + Private Keys + Watchonly sig_address_1 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_2 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) sig_address_3 = self.nodes[0].getaddressinfo( self.nodes[0].getnewaddress()) multi_sig_script = self.nodes[0].createmultisig( 2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']]) self.nodes[1].generate(100) transactionid = self.nodes[1].sendtoaddress( multi_sig_script['address'], 10.00) self.nodes[1].generate(1) timestamp = self.nodes[1].getblock( self.nodes[1].getbestblockhash())['mediantime'] self.log.info( "Should import a p2sh with respective redeem script and private keys") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": multi_sig_script['address'] }, "timestamp": "now", "redeemscript": multi_sig_script['redeemScript'], "keys": [self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])], "watchonly": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -8) assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys') # Address + Public key + !Internal + Wrong pubkey self.log.info("Should not import an address with a wrong public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "pubkeys": [address2['pubkey']] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Public key + internal + Wrong pubkey self.log.info( "Should not import a scriptPubKey with internal and with a wrong public key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) request = [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "pubkeys": [address2['pubkey']], "internal": True }] result = self.nodes[1].importmulti(request) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Address + Private key + !watchonly + Wrong private key self.log.info("Should not import an address with a wrong private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": address['address'] }, "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address2['address'])] }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # ScriptPubKey + Private key + internal + Wrong private key self.log.info( "Should not import a scriptPubKey with internal and with a wrong private key") address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress()) result = self.nodes[1].importmulti([{ "scriptPubKey": address['scriptPubKey'], "timestamp": "now", "keys": [self.nodes[0].dumpprivkey(address2['address'])], "internal": True }]) assert_equal(result[0]['success'], False) assert_equal(result[0]['error']['code'], -5) assert_equal(result[0]['error']['message'], 'Consistency check failed') address_assert = self.nodes[1].getaddressinfo(address['address']) assert_equal(address_assert['iswatchonly'], False) assert_equal(address_assert['ismine'], False) assert_equal('timestamp' in address_assert, False) # Importing existing watch only address with new timestamp should replace saved timestamp. assert_greater_than(timestamp, watchonly_timestamp) self.log.info("Should replace previously saved watch only timestamp.") result = self.nodes[1].importmulti([{ "scriptPubKey": { "address": watchonly_address, }, "timestamp": "now", }]) assert_equal(result[0]['success'], True) address_assert = self.nodes[1].getaddressinfo(watchonly_address) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], timestamp) watchonly_timestamp = timestamp # restart nodes to check for proper serialization/deserialization of watch only address self.stop_nodes() self.start_nodes() address_assert = self.nodes[1].getaddressinfo(watchonly_address) assert_equal(address_assert['iswatchonly'], True) assert_equal(address_assert['ismine'], False) assert_equal(address_assert['timestamp'], watchonly_timestamp) # Bad or missing timestamps self.log.info("Should throw on invalid or missing timestamp values") assert_raises_rpc_error(-3, 'Missing required timestamp field for key', self.nodes[1].importmulti, [{ "scriptPubKey": address['scriptPubKey'], }]) assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string', self.nodes[1].importmulti, [{ "scriptPubKey": address['scriptPubKey'], "timestamp": "", }]) if __name__ == '__main__': ImportMultiTest().main() diff --git a/test/functional/wallet_importprunedfunds.py b/test/functional/wallet_importprunedfunds.py index bdf024775..24e41230a 100755 --- a/test/functional/wallet_importprunedfunds.py +++ b/test/functional/wallet_importprunedfunds.py @@ -1,122 +1,123 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test the importprunedfunds and removeprunedfunds RPCs.""" from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class ImportPrunedFundsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(101) self.sync_all() # address address1 = self.nodes[0].getnewaddress() # pubkey address2 = self.nodes[0].getnewaddress() # privkey address3 = self.nodes[0].getnewaddress() # Using privkey address3_privkey = self.nodes[0].dumpprivkey(address3) # Check only one address address_info = self.nodes[0].getaddressinfo(address1) assert_equal(address_info['ismine'], True) self.sync_all() # Node 1 sync test assert_equal(self.nodes[1].getblockcount(), 101) # Address Test - before import address_info = self.nodes[1].getaddressinfo(address1) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) address_info = self.nodes[1].getaddressinfo(address2) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) address_info = self.nodes[1].getaddressinfo(address3) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) # Send funds to self txnid1 = self.nodes[0].sendtoaddress(address1, 0.1) self.nodes[0].generate(1) rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex'] proof1 = self.nodes[0].gettxoutproof([txnid1]) txnid2 = self.nodes[0].sendtoaddress(address2, 0.05) self.nodes[0].generate(1) rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex'] proof2 = self.nodes[0].gettxoutproof([txnid2]) txnid3 = self.nodes[0].sendtoaddress(address3, 0.025) self.nodes[0].generate(1) rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex'] proof3 = self.nodes[0].gettxoutproof([txnid3]) self.sync_all() # Import with no affiliated address assert_raises_rpc_error( -5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1) balance1 = self.nodes[1].getbalance("", 0, True) assert_equal(balance1, Decimal(0)) # Import with affiliated address with no rescan self.nodes[1].importaddress(address2, "add2", False) self.nodes[1].importprunedfunds(rawtxn2, proof2) balance2 = self.nodes[1].getbalance("add2", 0, True) assert_equal(balance2, Decimal('0.05')) # Import with private key with no rescan self.nodes[1].importprivkey( privkey=address3_privkey, label="add3", rescan=False) self.nodes[1].importprunedfunds(rawtxn3, proof3) balance3 = self.nodes[1].getbalance("add3", 0, False) assert_equal(balance3, Decimal('0.025')) balance3 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance3, Decimal('0.075')) # Addresses Test - after import address_info = self.nodes[1].getaddressinfo(address1) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], False) address_info = self.nodes[1].getaddressinfo(address2) assert_equal(address_info['iswatchonly'], True) assert_equal(address_info['ismine'], False) address_info = self.nodes[1].getaddressinfo(address3) assert_equal(address_info['iswatchonly'], False) assert_equal(address_info['ismine'], True) # Remove transactions assert_raises_rpc_error( -8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1) balance1 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance1, Decimal('0.075')) self.nodes[1].removeprunedfunds(txnid2) balance2 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance2, Decimal('0.025')) self.nodes[1].removeprunedfunds(txnid3) balance3 = self.nodes[1].getbalance("*", 0, True) assert_equal(balance3, Decimal('0.0')) if __name__ == '__main__': ImportPrunedFundsTest().main() diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py index c6f323045..fa4591ffd 100755 --- a/test/functional/wallet_keypool.py +++ b/test/functional/wallet_keypool.py @@ -1,94 +1,93 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - -# Exercise the wallet keypool, and interaction with wallet encryption/locking +"""Test the wallet keypool and interaction with wallet encryption/locking.""" import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error class KeyPoolTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 def run_test(self): nodes = self.nodes addr_before_encrypting = nodes[0].getnewaddress() addr_before_encrypting_data = nodes[ 0].getaddressinfo(addr_before_encrypting) wallet_info_old = nodes[0].getwalletinfo() assert(addr_before_encrypting_data[ 'hdmasterkeyid'] == wallet_info_old['hdmasterkeyid']) # Encrypt wallet and wait to terminate nodes[0].node_encrypt_wallet('test') # Restart node 0 self.start_node(0) # Keep creating keys addr = nodes[0].getnewaddress() addr_data = nodes[0].getaddressinfo(addr) wallet_info = nodes[0].getwalletinfo() assert(addr_before_encrypting_data[ 'hdmasterkeyid'] != wallet_info['hdmasterkeyid']) assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid']) assert_raises_rpc_error( -12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) # put six (plus 2) new keys in the keypool (100% external-, +100% internal-keys, 1 in min) nodes[0].walletpassphrase('test', 12000) nodes[0].keypoolrefill(6) nodes[0].walletlock() wi = nodes[0].getwalletinfo() assert_equal(wi['keypoolsize_hd_internal'], 6) assert_equal(wi['keypoolsize'], 6) # drain the internal keys nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() nodes[0].getrawchangeaddress() addr = set() # the next one should fail assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].getrawchangeaddress) # drain the external keys addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) addr.add(nodes[0].getnewaddress()) assert(len(addr) == 6) # the next one should fail assert_raises_rpc_error( -12, "Error: Keypool ran out, please call keypoolrefill first", nodes[0].getnewaddress) # refill keypool with three new addresses nodes[0].walletpassphrase('test', 1) nodes[0].keypoolrefill(3) # test walletpassphrase timeout time.sleep(1.1) assert_equal(nodes[0].getwalletinfo()["unlocked_until"], 0) # drain them by mining nodes[0].generate(1) nodes[0].generate(1) nodes[0].generate(1) assert_raises_rpc_error(-12, "Keypool ran out", nodes[0].generate, 1) nodes[0].walletpassphrase('test', 100) nodes[0].keypoolrefill(100) wi = nodes[0].getwalletinfo() assert_equal(wi['keypoolsize_hd_internal'], 100) assert_equal(wi['keypoolsize'], 100) if __name__ == '__main__': KeyPoolTest().main() diff --git a/test/functional/wallet_listreceivedby.py b/test/functional/wallet_listreceivedby.py index f01ca6f92..c90a6d1e9 100755 --- a/test/functional/wallet_listreceivedby.py +++ b/test/functional/wallet_listreceivedby.py @@ -1,168 +1,168 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - """Test the listreceivedbyaddress RPC.""" + from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_array_result, assert_equal, assert_raises_rpc_error, ) class ReceivedByTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 def run_test(self): # Generate block to get out of IBD self.nodes[0].generate(1) self.log.info("listreceivedbyaddress Test") # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # Check not listed in listreceivedbyaddress because has 0 confirmations assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, {}, True) # Bury Tx under 10 block so it will be returned by listreceivedbyaddress self.nodes[1].generate(10) self.sync_all() assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence < 10 assert_array_result(self.nodes[1].listreceivedbyaddress(5), {"address": addr}, {"address": addr, "label": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence > 10, should not find Tx assert_array_result(self.nodes[1].listreceivedbyaddress(11), { "address": addr}, {}, True) # Empty Tx empty_addr = self.nodes[1].getnewaddress() assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), {"address": empty_addr}, {"address": empty_addr, "label": "", "amount": 0, "confirmations": 0, "txids": []}) # Test Address filtering # Only on addr expected = {"address": addr, "label": "", "amount": Decimal( "0.1"), "confirmations": 10, "txids": [txid, ]} res = self.nodes[1].listreceivedbyaddress( minconf=0, include_empty=True, include_watchonly=True, address_filter=addr) assert_array_result(res, {"address": addr}, expected) assert_equal(len(res), 1) # Error on invalid address assert_raises_rpc_error(-4, "address_filter parameter was invalid", self.nodes[1].listreceivedbyaddress, minconf=0, include_empty=True, include_watchonly=True, address_filter="bamboozling") # Another address receive money res = self.nodes[1].listreceivedbyaddress(0, True, True) assert_equal(len(res), 2) # Right now 2 entries other_addr = self.nodes[1].getnewaddress() txid2 = self.nodes[0].sendtoaddress(other_addr, 0.1) self.nodes[0].generate(1) self.sync_all() # Same test as above should still pass expected = {"address": addr, "label": "", "amount": Decimal( "0.1"), "confirmations": 11, "txids": [txid, ]} res = self.nodes[1].listreceivedbyaddress(0, True, True, addr) assert_array_result(res, {"address": addr}, expected) assert_equal(len(res), 1) # Same test as above but with other_addr should still pass expected = {"address": other_addr, "label": "", "amount": Decimal( "0.1"), "confirmations": 1, "txids": [txid2, ]} res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr) assert_array_result(res, {"address": other_addr}, expected) assert_equal(len(res), 1) # Should be two entries though without filter res = self.nodes[1].listreceivedbyaddress(0, True, True) assert_equal(len(res), 3) # Became 3 entries # Not on random addr # note on node[0]! just a random addr other_addr = self.nodes[0].getnewaddress() res = self.nodes[1].listreceivedbyaddress(0, True, True, other_addr) assert_equal(len(res), 0) self.log.info("getreceivedbyaddress Test") # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # Check balance is 0 because of 0 confirmations balance = self.nodes[1].getreceivedbyaddress(addr) assert_equal(balance, Decimal("0.0")) # Check balance is 0.1 balance = self.nodes[1].getreceivedbyaddress(addr, 0) assert_equal(balance, Decimal("0.1")) # Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress self.nodes[1].generate(10) self.sync_all() balance = self.nodes[1].getreceivedbyaddress(addr) assert_equal(balance, Decimal("0.1")) # Trying to getreceivedby for an address the wallet doesn't own should return an error assert_raises_rpc_error(-4, "Address not found in wallet", self.nodes[0].getreceivedbyaddress, addr) self.log.info("listreceivedbylabel + getreceivedbylabel Test") # set pre-state addrArr = self.nodes[1].getnewaddress() label = self.nodes[1].getaccount(addrArr) received_by_label_json = [ r for r in self.nodes[1].listreceivedbylabel() if r["label"] == label][0] balance_by_label = self.nodes[1].getreceivedbylabel(label) txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # listreceivedbylabel should return received_by_label_json because of 0 confirmations assert_array_result(self.nodes[1].listreceivedbylabel(), {"label": label}, received_by_label_json) # getreceivedbyaddress should return same balance because of 0 confirmations balance = self.nodes[1].getreceivedbylabel(label) assert_equal(balance, balance_by_label) self.nodes[1].generate(10) self.sync_all() # listreceivedbylabel should return updated received list assert_array_result(self.nodes[1].listreceivedbylabel(), {"label": label}, {"label": received_by_label_json["label"], "amount": (received_by_label_json["amount"] + Decimal("0.1"))}) # getreceivedbylabel should return updated receive total balance = self.nodes[1].getreceivedbylabel(label) assert_equal(balance, balance_by_label + Decimal("0.1")) # Create a new label named "mynewlabel" that has a 0 balance self.nodes[1].getlabeladdress("mynewlabel") received_by_label_json = [r for r in self.nodes[1].listreceivedbylabel( 0, True) if r["label"] == "mynewlabel"][0] # Test includeempty of listreceivedbylabel assert_equal(received_by_label_json["amount"], Decimal("0.0")) # Test getreceivedbylabel for 0 amount labels balance = self.nodes[1].getreceivedbylabel("mynewlabel") assert_equal(balance, Decimal("0.0")) if __name__ == '__main__': ReceivedByTest().main() diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py index 315474d72..86ff70943 100755 --- a/test/functional/wallet_listsinceblock.py +++ b/test/functional/wallet_listsinceblock.py @@ -1,285 +1,285 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. - +"""Test the listsincelast RPC.""" from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_array_result, assert_raises_rpc_error class ListSinceBlockTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True self.extra_args = [["-noparkdeepreorg"], ["-noparkdeepreorg"], [], []] def run_test(self): self.nodes[2].generate(101) self.sync_all() self.test_no_blockhash() self.test_invalid_blockhash() self.test_reorg() self.test_double_spend() self.test_double_send() def test_no_blockhash(self): txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) blockhash, = self.nodes[2].generate(1) self.sync_all() txs = self.nodes[0].listtransactions() assert_array_result(txs, {"txid": txid}, { "category": "receive", "amount": 1, "blockhash": blockhash, "confirmations": 1, }) assert_equal( self.nodes[0].listsinceblock(), {"lastblock": blockhash, "removed": [], "transactions": txs}) assert_equal( self.nodes[0].listsinceblock(""), {"lastblock": blockhash, "removed": [], "transactions": txs}) def test_invalid_blockhash(self): assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "0000000000000000000000000000000000000000000000000000000000000000") assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock, "invalid-hex") def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was no longer in the main chain: ab0 / \ aa1 [tx0] bb1 | | aa2 bb2 | | aa3 bb3 | bb4 Consider a client that has only seen block `aa3` above. It asks the node to `listsinceblock aa3`. But at some point prior the main chain switched to the bb chain. Previously: listsinceblock would find height=4 for block aa3 and compare this to height=5 for the tip of the chain (bb4). It would then return results restricted to bb3-bb4. Now: listsinceblock finds the fork at ab0 and returns results in the range bb1-bb4. This test only checks that [tx0] is present. ''' # Split network into two self.split_network() # send to nodes[0] from nodes[2] senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1) # generate on both sides lastblockhash = self.nodes[1].generate(6)[5] self.nodes[2].generate(7) self.log.info('lastblockhash={}'.format(lastblockhash)) self.sync_all([self.nodes[:2], self.nodes[2:]]) self.join_network() # listsinceblock(lastblockhash) should now include tx, as seen from # nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) found = False for tx in lsbres['transactions']: if tx['txid'] == senttx: found = True break assert found def test_double_spend(self): ''' This tests the case where the same UTXO is spent twice on two separate blocks as part of a reorg. ab0 / \ aa1 [tx1] bb1 [tx2] | | aa2 bb2 | | aa3 bb3 | bb4 Problematic case: 1. User 1 receives BCH in tx1 from utxo1 in block aa1. 2. User 2 receives BCH in tx2 from utxo1 (same) in block bb1 3. User 1 sees 2 confirmations at block aa3. 4. Reorg into bb chain. 5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now invalidated. Currently the solution to this is to detect that a reorg'd block is asked for in listsinceblock, and to iterate back over existing blocks up until the fork point, and to include all transactions that relate to the node wallet. ''' self.sync_all() # Split network into two self.split_network() # share utxo between nodes[1] and nodes[2] utxos = self.nodes[2].listunspent() utxo = utxos[0] privkey = self.nodes[2].dumpprivkey(utxo['address']) self.nodes[1].importprivkey(privkey) # send from nodes[1] using utxo to nodes[0] change = '{:.8f}'.format(float(utxo['amount']) - 1.0003) recipientDict = { self.nodes[0].getnewaddress(): 1, self.nodes[1].getnewaddress(): change, } utxoDicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] txid1 = self.nodes[1].sendrawtransaction( self.nodes[1].signrawtransactionwithwallet( self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex']) # send from nodes[2] using utxo to nodes[3] recipientDict2 = { self.nodes[3].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } self.nodes[2].sendrawtransaction( self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex']) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(4) self.join_network() self.sync_all() # gettransaction should work for txid1 assert self.nodes[0].gettransaction( txid1)['txid'] == txid1, "gettransaction failed to find txid1" # listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0] lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # but it should not include 'removed' if include_removed=false lsbres2 = self.nodes[0].listsinceblock( blockhash=lastblockhash, include_removed=False) assert 'removed' not in lsbres2 def test_double_send(self): ''' This tests the case where the same transaction is submitted twice on two separate blocks as part of a reorg. The former will vanish and the latter will appear as the true transaction (with confirmations dropping as a result). ab0 / \ aa1 [tx1] bb1 | | aa2 bb2 | | aa3 bb3 [tx1] | bb4 Asserted: 1. tx1 is listed in listsinceblock. 2. It is included in 'removed' as it was removed, even though it is now present in a different block. 3. It is listed with a confirmations count of 2 (bb3, bb4), not 3 (aa1, aa2, aa3). ''' self.sync_all() # Split network into two self.split_network() # create and sign a transaction utxos = self.nodes[2].listunspent() utxo = utxos[0] change = '{:.8f}'.format(float(utxo['amount']) - 1.0003) recipientDict = { self.nodes[0].getnewaddress(): 1, self.nodes[2].getnewaddress(): change, } utxoDicts = [{ 'txid': utxo['txid'], 'vout': utxo['vout'], }] signedtxres = self.nodes[2].signrawtransactionwithwallet( self.nodes[2].createrawtransaction(utxoDicts, recipientDict)) assert signedtxres['complete'] signedtx = signedtxres['hex'] # send from nodes[1]; this will end up in aa1 txid1 = self.nodes[1].sendrawtransaction(signedtx) # generate bb1-bb2 on right side self.nodes[2].generate(2) # send from nodes[2]; this will end up in bb3 txid2 = self.nodes[2].sendrawtransaction(signedtx) assert_equal(txid1, txid2) # generate on both sides lastblockhash = self.nodes[1].generate(3)[2] self.nodes[2].generate(2) self.join_network() self.sync_all() # gettransaction should work for txid1 self.nodes[0].gettransaction(txid1) # listsinceblock(lastblockhash) should now include txid1 in transactions # as well as in removed lsbres = self.nodes[0].listsinceblock(lastblockhash) assert any(tx['txid'] == txid1 for tx in lsbres['transactions']) assert any(tx['txid'] == txid1 for tx in lsbres['removed']) # find transaction and ensure confirmations is valid for tx in lsbres['transactions']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) # the same check for the removed array; confirmations should STILL be 2 for tx in lsbres['removed']: if tx['txid'] == txid1: assert_equal(tx['confirmations'], 2) if __name__ == '__main__': ListSinceBlockTest().main()