diff --git a/test/functional/feature_abortnode.py b/test/functional/feature_abortnode.py index 666ab5ac2..ae920c2b1 100755 --- a/test/functional/feature_abortnode.py +++ b/test/functional/feature_abortnode.py @@ -1,51 +1,52 @@ #!/usr/bin/env python3 # Copyright (c) 2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoind aborts if can't disconnect a block. - Start a single node and generate 3 blocks. - Delete the undo data. - Mine a fork that requires disconnecting the tip. - Verify that bitcoind AbortNode's. """ -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import get_datadir_path, connect_nodes import os +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import connect_nodes, get_datadir_path + class AbortNodeTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 2 self.extra_args = [["-noparkdeepreorg"], []] def setup_network(self): self.setup_nodes() # We'll connect the nodes later def run_test(self): self.nodes[0].generate(3) datadir = get_datadir_path(self.options.tmpdir, 0) # Deleting the undo file will result in reorg failure os.unlink(os.path.join(datadir, self.chain, 'blocks', 'rev00000.dat')) # Connecting to a node with a more work chain will trigger a reorg # attempt. self.nodes[1].generate(3) with self.nodes[0].assert_debug_log(["Failed to disconnect block"]): connect_nodes(self.nodes[0], self.nodes[1]) self.nodes[1].generate(1) # Check that node0 aborted self.log.info("Waiting for crash") self.nodes[0].wait_until_stopped(timeout=200) self.log.info("Node crashed - now verifying restart fails") self.nodes[0].assert_start_raises_init_error() if __name__ == '__main__': AbortNodeTest().main() diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 22e0f92d2..9d90a68ec 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -1,209 +1,209 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test logic for skipping signature validation on old blocks. Test logic for skipping signature validation on blocks which we've assumed valid (https://github.com/bitcoin/bitcoin/pull/9484) We build a chain that includes and invalid signature for one of the transactions: 0: genesis block 1: block 1 with coinbase transaction output. 2-101: bury that block with 100 blocks so the coinbase transaction output can be spent 102: a block containing a transaction spending the coinbase transaction output. The transaction has an invalid signature. 103-2202: bury the bad block with just over two weeks' worth of blocks (2100 blocks) Start three nodes: - node0 has no -assumevalid parameter. Try to sync to block 2202. It will reject block 102 and only sync as far as block 101 - node1 has -assumevalid set to the hash of block 102. Try to sync to block 2202. node1 will sync all the way to block 2202. - node2 has -assumevalid set to the hash of block 102. Try to sync to block 200. node2 will reject block 102 since it's assumed valid, but it isn't buried by at least two weeks' work. """ import time -from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.blocktools import create_block, create_coinbase from test_framework.key import ECKey from test_framework.messages import ( CBlockHeader, COutPoint, CTransaction, CTxIn, CTxOut, msg_block, msg_headers, ) from test_framework.p2p import P2PInterface -from test_framework.script import (CScript, OP_TRUE) +from test_framework.script import OP_TRUE, CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal class BaseNode(P2PInterface): def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) class AssumeValidTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 # Need a bit of extra time when running with the thread sanitizer self.rpc_timeout = 120 def setup_network(self): self.add_nodes(3) # Start node0. We don't start the other nodes yet since # we need to pre-mine a block with an invalid transaction # signature so we can pass in the block hash as assumevalid. self.start_node(0) def send_blocks_until_disconnected(self, p2p_conn): """Keep sending blocks to the node until we're disconnected.""" for i in range(len(self.blocks)): if not p2p_conn.is_connected: break try: p2p_conn.send_message(msg_block(self.blocks[i])) except IOError: assert not p2p_conn.is_connected break def assert_blockchain_height(self, node, height): """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" last_height = node.getblock(node.getbestblockhash())['height'] timeout = 10 while True: time.sleep(0.25) current_height = node.getblock(node.getbestblockhash())['height'] if current_height != last_height: last_height = current_height if timeout < 0: assert False, "blockchain too short after timeout: {}".format( current_height) timeout - 0.25 continue elif current_height > height: assert False, "blockchain too long: {}".format(current_height) elif current_height == height: break def run_test(self): p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = ECKey() coinbase_key.generate() coinbase_pubkey = coinbase_key.get_pubkey().get_bytes() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase( height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for _ in range(100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid # (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) pad_tx(tx) tx.calc_sha256() block102 = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for _ in range(2100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 self.nodes[0].disconnect_p2ps() # Start node1 and node2 with assumevalid so they accept a block with a # bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty # of time to sync. p2p1.sync_with_ping(960) assert_equal(self.nodes[1].getblock( self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101) if __name__ == '__main__': AssumeValidTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index cb60cee2a..52da535ef 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -1,509 +1,506 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP68 implementation.""" import time -from test_framework.blocktools import ( - create_block, - create_coinbase, -) +from test_framework.blocktools import create_block, create_coinbase from test_framework.messages import ( XEC, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex, ) from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, satoshi_round, ) SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) # this means use time (0 means height) SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) # this is a bit-shift SEQUENCE_LOCKTIME_GRANULARITY = 9 SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "non-BIP68-final" class BIP68Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [ [ "-noparkdeepreorg", "-maxreorgdepth=-1", "-acceptnonstdtxn=1", # bump because mocktime might cause a disconnect otherwise "-peertimeout=9999", ], [ "-acceptnonstdtxn=0", "-maxreorgdepth=-1" ] ] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Generate some coins self.nodes[0].generate(110) self.log.info("Running test disable flag") self.test_disable_flag() self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() self.log.info( "Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() print("Verifying nVersion=2 transactions are standard.") print("Note that with current versions of bitcoin software, nVersion=2 transactions are always standard (independent of BIP68 activation status).") self.test_version2_relay() self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() # send 2,000,000 XEC self.nodes[0].sendtoaddress(new_addr, 2000000) utxos = self.nodes[0].listunspent(0, 0) assert len(utxos) > 0 utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee) * XEC) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] pad_tx(tx1) tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))[ "hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value - self.relayfee * XEC), CScript([b'a']))] pad_tx(tx2) tx2.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( self.nodes[0].getblockcount() - confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending # confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in range(num_outputs): outputs[addresses[i]] = random.randint(1, 20) * 10000 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for _ in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in range(num_inputs): # this disables sequence locks sequence_value = 0xfffffffe # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1, 10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. orig_time = self.get_median_time_past( utxos[j]["confirmations"]) # MTP of the tip cur_time = self.get_median_time_past(0) # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time # lock if random.randint(0, 1) and can_time_lock: # Find first time-lock value that fails, or latest one # that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"] * XEC # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( CTxOut(int(value - self.relayfee * tx_size * XEC / 1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))[ "hex"] if (using_sequence_locks and not should_pass): # This transaction should be rejected assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) else: # This raw transaction should be accepted self.nodes[0].sendrawtransaction(rawtx) utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), 2000000) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # As the fees are calculated prior to the transaction being signed, # there is some uncertainty that calculate fee provides the correct # minimal fee. Since regtest coins are free, let's go ahead and # increase the fee by an order of magnitude to ensure this test # passes. fee_multiplier = 10 # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(0), CScript([b'a']))] tx2.vout[0].nValue = tx1.vout[0].nValue - \ fee_multiplier * self.nodes[0].calculate_fee(tx2) tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))] pad_tx(tx) tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the # mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( txid=tx2.hash, fee_delta=-fee_multiplier * self.nodes[0].calculate_fee(tx2)) cur_time = int(time.time()) for _ in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert tx2.hash in self.nodes[0].getrawmempool() test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( txid=tx2.hash, fee_delta=fee_multiplier * self.nodes[0].calculate_fee(tx2)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert tx2.hash not in self.nodes[0].getrawmempool() # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) assert tx3.hash in self.nodes[0].getrawmempool() self.nodes[0].generate(1) assert tx3.hash not in self.nodes[0].getrawmempool() # One more test, this time using height locks tx4 = test_nonzero_locks( tx3, self.nodes[0], use_height_lock=True) assert tx4.hash in self.nodes[0].getrawmempool() # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks( tx4, self.nodes[0], use_height_lock=True) assert tx5.hash not in self.nodes[0].getrawmempool() utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * XEC) raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert tx4.hash not in self.nodes[0].getrawmempool() assert tx3.hash in self.nodes[0].getrawmempool() # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash( self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 assert_equal( None if i == 1 else 'inconclusive', self.nodes[0].submitblock( ToHex(block))) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert tx3.hash not in mempool assert tx2.hash in mempool # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock( self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10) def get_csv_status(self): height = self.nodes[0].getblockchaininfo()['blocks'] return height >= 576 # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert_equal(self.get_csv_status(), False) txid = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), 2000000) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * XEC), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) pad_tx(tx2) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 # 100 block relative locktime sequence_value = 100 tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * XEC), CScript([b'a']))] pad_tx(tx3) tx3.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend( sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() assert_equal(None, self.nodes[0].submitblock(ToHex(block))) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 576 csv_activation_height = 576 height = self.nodes[0].getblockcount() assert_greater_than(csv_activation_height - height, 1) self.nodes[0].generate(csv_activation_height - height - 1) assert_equal(self.get_csv_status(), False) disconnect_nodes(self.nodes[0], self.nodes[1]) self.nodes[0].generate(1) assert_equal(self.get_csv_status(), True) # We have a block that has CSV activated, but we want to be at # the activation point, so we invalidate the tip. self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) connect_nodes(self.nodes[0], self.nodes[1]) self.sync_blocks() # Use self.nodes[1] to test that version 2 transactions are standard. def test_version2_relay(self): inputs = [] outputs = {self.nodes[1].getnewaddress(): 1000000.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))[ "hex"] self.nodes[1].sendrawtransaction(tx_signed) if __name__ == '__main__': BIP68Test().main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 36390ca55..e0b8376e3 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -1,1304 +1,1303 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2017 The Bitcoin Core developers # Copyright (c) 2019 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test block processing.""" import copy import struct import time +from data import invalid_txs from test_framework.blocktools import ( create_block, create_coinbase, create_tx_with_script, make_conform_to_ctor, ) from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.key import ECKey from test_framework.messages import ( - CBlock, COIN, + CBlock, COutPoint, CTransaction, CTxIn, CTxOut, uint256_from_compact, uint256_from_str, ) from test_framework.p2p import P2PDataStore from test_framework.script import ( - CScript, OP_ELSE, OP_ENDIF, OP_FALSE, OP_IF, OP_INVALIDOPCODE, OP_RETURN, OP_TRUE, SIGHASH_ALL, SIGHASH_FORKID, + CScript, SignatureHashForkId, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal -from data import invalid_txs - # Use this class for tests that require behavior other than normal p2p behavior. # For now, it is used to serialize a bloated varint (b64). class CBrokenBlock(CBlock): def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() def serialize(self): r = b"" r += super(CBlock, self).serialize() r += struct.pack(" b1 (0) -> b2 (1) b1 = self.next_block(1, spend=out[0]) self.save_spendable_output() b2 = self.next_block(2, spend=out[1]) self.save_spendable_output() self.send_blocks([b1, b2], timeout=4) # Select a txn with an output eligible for spending. This won't actually be spent, # since we're testing submission of a series of blocks with invalid # txns. attempt_spend_tx = out[2] # Submit blocks for rejection, each of which contains a single transaction # (aside from coinbase) which should be considered invalid. for TxTemplate in invalid_txs.iter_all_templates(): template = TxTemplate(spend_tx=attempt_spend_tx) if template.valid_in_block: continue self.log.info( "Reject block with invalid tx: %s", TxTemplate.__name__) blockname = "for_invalid.{}".format(TxTemplate.__name__) badblock = self.next_block(blockname) badtx = template.get_tx() if TxTemplate != invalid_txs.InputMissing: self.sign_tx(badtx, attempt_spend_tx) badtx.rehash() badblock = self.update_block(blockname, [badtx]) self.send_blocks( [badblock], success=False, reject_reason=( template.block_reject_reason or template.reject_reason), reconnect=True, timeout=2) self.move_tip(2) # Fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. self.log.info("Don't reorg to a chain of the same length") self.move_tip(1) b3 = self.next_block(3, spend=out[1]) txout_b3 = b3.vtx[1] self.send_blocks([b3], False) # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) self.log.info("Reorg to a longer chain") b4 = self.next_block(4, spend=out[2]) self.send_blocks([b4]) # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) self.move_tip(2) b5 = self.next_block(5, spend=out[2]) self.save_spendable_output() self.send_blocks([b5], False) self.log.info("Reorg back to the original chain") b6 = self.next_block(6, spend=out[3]) self.send_blocks([b6], True) # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain with a double spend, even if it is longer") self.move_tip(5) b7 = self.next_block(7, spend=out[2]) self.send_blocks([b7], False) b8 = self.next_block(8, spend=out[4]) self.send_blocks([b8], False, reconnect=True) # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block where the miner creates too much coinbase reward") self.move_tip(6) b9 = self.next_block(9, spend=out[4], additional_coinbase_value=1) self.send_blocks([b9], success=False, reject_reason='bad-cb-amount', reconnect=True) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer") self.move_tip(5) b10 = self.next_block(10, spend=out[3]) self.send_blocks([b10], False) b11 = self.next_block(11, spend=out[4], additional_coinbase_value=1) self.send_blocks([b11], success=False, reject_reason='bad-cb-amount', reconnect=True) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a chain where the miner creates too much coinbase reward, even if the chain is longer (on a forked chain)") self.move_tip(5) b12 = self.next_block(12, spend=out[3]) self.save_spendable_output() b13 = self.next_block(13, spend=out[4]) self.save_spendable_output() b14 = self.next_block(14, spend=out[5], additional_coinbase_value=1) self.send_blocks([b12, b13, b14], success=False, reject_reason='bad-cb-amount', reconnect=True) # New tip should be b13. assert_equal(node.getbestblockhash(), b13.hash) self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py self.move_tip(13) b15 = self.next_block(15) self.save_spendable_output() self.send_blocks([b15], True) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) self.log.info("Reject a block with a spend from a re-org'ed out tx") self.move_tip(15) b17 = self.next_block(17, spend=txout_b3) self.send_blocks([b17], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block with a spend from a re-org'ed out tx (on a forked chain)") self.move_tip(13) b18 = self.next_block(18, spend=txout_b3) self.send_blocks([b18], False) b19 = self.next_block(19, spend=out[6]) self.send_blocks([b19], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) self.log.info("Reject a block spending an immature coinbase.") self.move_tip(15) b20 = self.next_block(20, spend=out[7]) self.send_blocks( [b20], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block spending an immature coinbase (on a forked chain)") self.move_tip(13) b21 = self.next_block(21, spend=out[6]) self.send_blocks([b21], False) b22 = self.next_block(22, spend=out[5]) self.send_blocks( [b22], success=False, reject_reason='bad-txns-premature-spend-of-coinbase', reconnect=True) # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) self.log.info("Accept a block of size LEGACY_MAX_BLOCK_SIZE") self.move_tip(15) b23 = self.next_block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = self.update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) self.send_blocks([b23], True) self.save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) self.log.info( "Reject a block with coinbase input script size out of range") self.move_tip(15) b26 = self.next_block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = self.update_block(26, []) self.send_blocks([b26], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b26 chain to make sure bitcoind isn't accepting b26 b27 = self.next_block(27, spend=out[7]) self.send_blocks([b27], False) # Now try a too-large-coinbase script self.move_tip(15) b28 = self.next_block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = self.update_block(28, []) self.send_blocks([b28], success=False, reject_reason='bad-cb-length', reconnect=True) # Extend the b28 chain to make sure bitcoind isn't accepting b28 b29 = self.next_block(29, spend=out[7]) self.send_blocks([b29], False) # b30 has a max-sized coinbase scriptSig. self.move_tip(23) b30 = self.next_block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = self.update_block(30, []) self.send_blocks([b30], True) self.save_spendable_output() self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py b31 = self.next_block(31) self.save_spendable_output() b33 = self.next_block(33) self.save_spendable_output() b35 = self.next_block(35) self.save_spendable_output() self.send_blocks([b31, b33, b35], True) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block self.log.info( "Reject a block spending transaction from a block which failed to connect") self.move_tip(35) b37 = self.next_block(37, spend=out[11]) txout_b37 = b37.vtx[1] tx = self.create_and_sign_transaction(out[11], 0) b37 = self.update_block(37, [tx]) self.send_blocks([b37], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid self.move_tip(35) b38 = self.next_block(38, spend=txout_b37) self.send_blocks([b38], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info("Skipped sigops tests") # tests were moved to feature_block_sigops.py self.move_tip(35) b39 = self.next_block(39) self.save_spendable_output() b41 = self.next_block(41) self.send_blocks([b39, b41], True) # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # self.move_tip(39) b42 = self.next_block(42, spend=out[12]) self.save_spendable_output() b43 = self.next_block(43, spend=out[13]) self.save_spendable_output() self.send_blocks([b42, b43], True) # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. self.log.info("Build block 44 manually") height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 self.send_blocks([b44], True) self.log.info("Reject a block with a non-coinbase as the first tx") non_coinbase = self.create_tx(out[15], 0, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.block_heights[b45.sha256] = self.block_heights[ self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 self.send_blocks([b45], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with no transactions") self.move_tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 self.send_blocks([b46], success=False, reject_reason='bad-cb-missing', reconnect=True) self.log.info("Reject a block with invalid work") self.move_tip(44) b47 = self.next_block(47) target = uint256_from_compact(b47.nBits) while b47.sha256 <= target: # Rehash nonces until an invalid too-high-hash block is found. b47.nNonce += 1 b47.rehash() self.send_blocks( [b47], False, force_send=True, reject_reason='high-hash', reconnect=True) self.log.info("Reject a block with a timestamp >2 hours in the future") self.move_tip(44) b48 = self.next_block(48) b48.nTime = int(time.time()) + 60 * 60 * 3 # Header timestamp has changed. Re-solve the block. b48.solve() self.send_blocks([b48], False, force_send=True, reject_reason='time-too-new') self.log.info("Reject a block with invalid merkle hash") self.move_tip(44) b49 = self.next_block(49) b49.hashMerkleRoot += 1 b49.solve() self.send_blocks([b49], success=False, reject_reason='bad-txnmrklroot', reconnect=True) self.log.info("Reject a block with incorrect POW limit") self.move_tip(44) b50 = self.next_block(50) b50.nBits = b50.nBits - 1 b50.solve() self.send_blocks( [b50], False, force_send=True, reject_reason='bad-diffbits', reconnect=True) self.log.info("Reject a block with two coinbase transactions") self.move_tip(44) b51 = self.next_block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = self.update_block(51, [cb2]) self.send_blocks([b51], success=False, reject_reason='bad-tx-coinbase', reconnect=True) self.log.info("Reject a block with duplicate transactions") self.move_tip(44) b52 = self.next_block(52, spend=out[15]) b52 = self.update_block(52, [b52.vtx[1]]) self.send_blocks([b52], success=False, reject_reason='tx-duplicate', reconnect=True) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # self.move_tip(43) b53 = self.next_block(53, spend=out[14]) self.send_blocks([b53], False) self.save_spendable_output() self.log.info("Reject a block with timestamp before MedianTimePast") b54 = self.next_block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() self.send_blocks( [b54], False, force_send=True, reject_reason='time-too-old', reconnect=True) # valid timestamp self.move_tip(53) b55 = self.next_block(55, spend=out[15]) b55.nTime = b35.nTime self.update_block(55, []) self.send_blocks([b55], True) self.save_spendable_output() # Test Merkle tree malleability # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end self.move_tip(55) b57 = self.next_block(57) tx = self.create_and_sign_transaction(out[16], 1) tx1 = self.create_tx(tx, 0, 1) b57 = self.update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx self.log.info( "Reject a block with a duplicate transaction in the Merkle Tree (but with a valid Merkle Root)") self.move_tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = self.update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) self.send_blocks([b56], success=False, reject_reason='bad-txns-duplicate', reconnect=True) # b57p2 - a good block with 6 tx'es, don't submit until end self.move_tip(55) b57p2 = self.next_block("57p2") tx = self.create_and_sign_transaction(out[16], 1) tx1 = self.create_tx(tx, 0, 1) tx2 = self.create_tx(tx1, 0, 1) tx3 = self.create_tx(tx2, 0, 1) tx4 = self.create_tx(tx3, 0, 1) b57p2 = self.update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's self.log.info( "Reject a block with two duplicate transactions in the Merkle Tree (but with a valid Merkle Root)") self.move_tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(len(b56p2.vtx), 6) b56p2 = self.update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) self.send_blocks([b56p2], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip("57p2") self.send_blocks([b57p2], True) self.move_tip(57) # The tip is not updated because 57p2 seen first self.send_blocks([b57], False) self.save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 () # \-> ??? (17) # # tx with prevout.n out of range self.log.info( "Reject a block with a transaction with prevout.n out of range") self.move_tip(57) b58 = self.next_block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) pad_tx(tx) tx.calc_sha256() b58 = self.update_block(58, [tx]) self.send_blocks([b58], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # tx with output value > input value self.log.info( "Reject a block with a transaction with outputs > inputs") self.move_tip(57) b59 = self.next_block(59) tx = self.create_and_sign_transaction(out[17], 51 * COIN) b59 = self.update_block(59, [tx]) self.send_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True) # reset to good chain self.move_tip(57) b60 = self.next_block(60) self.send_blocks([b60], True) self.save_spendable_output() # Test BIP30 (reject duplicate) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 () # \-> b61 () # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. See also CVE-2012-1909. # self.log.info( "Reject a block with a transaction with a duplicate hash of a previous transaction (BIP30)") self.move_tip(60) b61 = self.next_block(61) b61.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG b61.vtx[0].rehash() b61 = self.update_block(61, []) assert_equal(duplicate_tx.serialize(), b61.vtx[0].serialize()) self.send_blocks([b61], success=False, reject_reason='bad-txns-BIP30', reconnect=True) # Test BIP30 (allow duplicate if spent) # # -> b57 (16) -> b60 () # \-> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () # self.move_tip(57) b_spend_dup_cb = self.next_block('spend_dup_cb') tx = CTransaction() tx.vin.append(CTxIn(COutPoint(duplicate_tx.sha256, 0))) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) self.sign_tx(tx, duplicate_tx) tx.rehash() b_spend_dup_cb = self.update_block('spend_dup_cb', [tx]) b_dup_2 = self.next_block('dup_2') b_dup_2.vtx[0].vin[0].scriptSig = DUPLICATE_COINBASE_SCRIPT_SIG b_dup_2.vtx[0].rehash() b_dup_2 = self.update_block('dup_2', []) assert_equal(duplicate_tx.serialize(), b_dup_2.vtx[0].serialize()) assert_equal( self.nodes[0].gettxout( txid=duplicate_tx.hash, n=0)['confirmations'], 119) self.send_blocks([b_spend_dup_cb, b_dup_2], success=True) # The duplicate has less confirmations assert_equal( self.nodes[0].gettxout( txid=duplicate_tx.hash, n=0)['confirmations'], 1) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () # \-> b62 (18) # self.log.info( "Reject a block with a transaction with a nonfinal locktime") self.move_tip('dup_2') b62 = self.next_block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final # don't set nSequence tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert tx.vin[0].nSequence < 0xffffffff tx.calc_sha256() b62 = self.update_block(62, [tx]) self.send_blocks( [b62], success=False, reject_reason='bad-txns-nonfinal', reconnect=True) # Test a non-final coinbase is also rejected # # -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () # \-> b63 (-) # self.log.info( "Reject a block with a coinbase transaction with a nonfinal locktime") self.move_tip('dup_2') b63 = self.next_block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = self.update_block(63, []) self.send_blocks( [b63], success=False, reject_reason='bad-txns-nonfinal', reconnect=True) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b_spend_dup_cb (b_dup_cb) -> b_dup_2 () -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # self.log.info( "Accept a valid block even if a bloated version of the block has previously been sent") self.move_tip('dup_2') regular_block = self.next_block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.blocks["64a"] = b64a self.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = self.update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) self.send_blocks([b64a], success=False, reject_reason='non-canonical ReadCompactSize()') # bitcoind doesn't disconnect us for sending a bloated block, but if we subsequently # resend the header message, it won't send us the getdata message again. Just # disconnect and reconnect and then call send_blocks. # TODO: improve this test to be less dependent on P2P DOS behaviour. node.disconnect_p2ps() self.reconnect_p2p() self.move_tip('dup_2') b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.blocks[64] = b64 b64 = self.update_block(64, []) self.send_blocks([b64], True) self.save_spendable_output() # Spend an output created in the block itself # # -> b_dup_2 () -> b64 (18) -> b65 (19) # self.log.info( "Accept a block with a transaction spending an output created in the same block") self.move_tip(64) b65 = self.next_block(65) tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 0) b65 = self.update_block(65, [tx1, tx2]) self.send_blocks([b65], True) self.save_spendable_output() # Attempt to double-spend a transaction created in a block # # -> b64 (18) -> b65 (19) # \-> b67 (20) # # self.log.info( "Reject a block with a transaction double spending a transaction created in the same block") self.move_tip(65) b67 = self.next_block(67) tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue) tx2 = self.create_and_sign_transaction(tx1, 1) tx3 = self.create_and_sign_transaction(tx1, 2) b67 = self.update_block(67, [tx1, tx2, tx3]) self.send_blocks([b67], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # More tests of block subsidy # # -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # self.log.info( "Reject a block trying to claim too much subsidy in the coinbase transaction") self.move_tip(65) b68 = self.next_block(68, additional_coinbase_value=10) tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 9) b68 = self.update_block(68, [tx]) self.send_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True) self.log.info( "Accept a block claiming the correct subsidy in the coinbase transaction") self.move_tip(65) b69 = self.next_block(69, additional_coinbase_value=10) tx = self.create_and_sign_transaction( out[20], out[20].vout[0].nValue - 10) self.update_block(69, [tx]) self.send_blocks([b69], True) self.save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b65 (19) -> b69 (20) # \-> b70 (21) # self.log.info( "Reject a block containing a transaction spending from a non-existent input") self.move_tip(69) b70 = self.next_block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) pad_tx(tx) b70 = self.update_block(70, [tx]) self.send_blocks([b70], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, # it has the same hash as b72. self.log.info( "Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability") self.move_tip(69) b72 = self.next_block(72) tx1 = self.create_and_sign_transaction(out[21], 2) tx2 = self.create_and_sign_transaction(tx1, 1) b72 = self.update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) # add duplicate last transaction b71.vtx.append(b72.vtx[-1]) # b71 builds off b69 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 self.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) self.move_tip(71) self.send_blocks([b71], success=False, reject_reason='bad-txns-duplicate', reconnect=True) self.move_tip(72) self.send_blocks([b72], True) self.save_spendable_output() self.log.info("Skipped sigops tests") # sigops tests were moved to feature_block_sigops.py, # then deleted from Bitcoin ABC after the May 2020 upgrade b75 = self.next_block(75) self.save_spendable_output() b76 = self.next_block(76) self.save_spendable_output() self.send_blocks([b75, b76], True) # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) self.log.info("Test transaction resurrection during a re-org") self.move_tip(76) b77 = self.next_block(77) tx77 = self.create_and_sign_transaction(out[24], 10 * COIN) b77 = self.update_block(77, [tx77]) self.send_blocks([b77], True) self.save_spendable_output() b78 = self.next_block(78) tx78 = self.create_tx(tx77, 0, 9 * COIN) b78 = self.update_block(78, [tx78]) self.send_blocks([b78], True) b79 = self.next_block(79) tx79 = self.create_tx(tx78, 0, 8 * COIN) b79 = self.update_block(79, [tx79]) self.send_blocks([b79], True) # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) self.move_tip(77) b80 = self.next_block(80, spend=out[25]) self.send_blocks([b80], False, force_send=True) self.save_spendable_output() b81 = self.next_block(81, spend=out[26]) # other chain is same length self.send_blocks([b81], False, force_send=True) self.save_spendable_output() b82 = self.next_block(82, spend=out[27]) # now this chain is longer, triggers re-org self.send_blocks([b82], True) self.save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert tx78.hash in mempool assert tx79.hash in mempool # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # self.log.info( "Accept a block with invalid opcodes in dead execution paths") b83 = self.next_block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = self.create_and_sign_transaction( out[28], out[28].vout[0].nValue, script) tx2 = self.create_and_sign_transaction(tx1, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() b83 = self.update_block(83, [tx1, tx2]) self.send_blocks([b83], True) self.save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # self.log.info("Test re-orging blocks with OP_RETURN in them") b84 = self.next_block(84) tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN])) vout_offset = len(tx1.vout) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() self.sign_tx(tx1, out[29]) tx1.rehash() tx2 = self.create_tx(tx1, vout_offset, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = self.create_tx(tx1, vout_offset + 1, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = self.create_tx(tx1, vout_offset + 2, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = self.create_tx(tx1, vout_offset + 3, 0, CScript([OP_RETURN])) b84 = self.update_block(84, [tx1, tx2, tx3, tx4, tx5]) self.send_blocks([b84], True) self.save_spendable_output() self.move_tip(83) b85 = self.next_block(85, spend=out[29]) self.send_blocks([b85], False) # other chain is same length b86 = self.next_block(86, spend=out[30]) self.send_blocks([b86], True) self.move_tip(84) b87 = self.next_block(87, spend=out[30]) self.send_blocks([b87], False) # other chain is same length self.save_spendable_output() b88 = self.next_block(88, spend=out[31]) self.send_blocks([b88], True) self.save_spendable_output() # trying to spend the OP_RETURN output is rejected b89a = self.next_block("89a", spend=out[32]) tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE])) b89a = self.update_block("89a", [tx]) self.send_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True) self.log.info( "Test a re-org of one week's worth of blocks (1088 blocks)") self.move_tip(88) LARGE_REORG_SIZE = 1088 blocks = [] spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = self.next_block(i, spend, version=4) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = self.update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) blocks.append(b) self.save_spendable_output() spend = self.get_spendable_output() self.send_blocks(blocks, True, timeout=1920) chain1_tip = i # now create alt chain of same length self.move_tip(88) blocks2 = [] for i in range(89, LARGE_REORG_SIZE + 89): blocks2.append(self.next_block("alt" + str(i), version=4)) self.send_blocks(blocks2, False, force_send=True) # extend alt chain to trigger re-org block = self.next_block("alt" + str(chain1_tip + 1), version=4) self.send_blocks([block], True, timeout=1920) # ... and re-org back to the first chain self.move_tip(chain1_tip) block = self.next_block(chain1_tip + 1, version=4) self.send_blocks([block], False, force_send=True) block = self.next_block(chain1_tip + 2, version=4) self.send_blocks([block], True, timeout=1920) self.log.info("Reject a block with an invalid block header version") b_v1 = self.next_block('b_v1', version=1) self.send_blocks( [b_v1], success=False, force_send=True, reject_reason='bad-version(0x00000001)', reconnect=True) self.move_tip(chain1_tip + 2) b_cb34 = self.next_block('b_cb34', version=4) b_cb34.vtx[0].vin[0].scriptSig = b_cb34.vtx[0].vin[0].scriptSig[:-1] b_cb34.vtx[0].rehash() b_cb34.hashMerkleRoot = b_cb34.calc_merkle_root() b_cb34.solve() self.send_blocks( [b_cb34], success=False, reject_reason='bad-cb-height', reconnect=True) # Helper methods ################ def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): return create_tx_with_script( spend_tx, n, amount=value, script_pub_key=script) # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx): scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( spend_tx.vout[0].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[0].nValue) tx.vin[0].scriptSig = CScript( [self.coinbase_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction( self, spend_tx, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, 0, value, script) self.sign_tx(tx, spend_tx) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), *, version=1): if self.tip is None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value coinbase.rehash() if spend is None: block = create_block( base_block_hash, coinbase, block_time, version=version) else: # all but one satoshi to fees coinbase.vout[0].nValue += spend.vout[0].nValue - 1 coinbase.rehash() block = create_block( base_block_hash, coinbase, block_time, version=version) # spend 1 satoshi tx = self.create_tx(spend, 0, 1, script) self.sign_tx(tx, spend) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Block is created. Find a valid nonce. block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block # save the current tip so it can be spent by a later block def save_spendable_output(self): self.log.debug("saving spendable output {}".format(self.tip.vtx[0])) self.spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(self): self.log.debug("getting spendable output {}".format( self.spendable_outputs[0].vtx[0])) return self.spendable_outputs.pop(0).vtx[0] # move the tip back to a previous block def move_tip(self, number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(self, block_number, new_transactions, reorder=True): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 if reorder: make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block def bootstrap_p2p(self, timeout=10): """Add a P2P connection to the node. Helper to connect and wait for version handshake.""" self.nodes[0].add_p2p_connection(P2PDataStore()) # We need to wait for the initial getheaders from the peer before we # start populating our blockstore. If we don't, then we may run ahead # to the next subtest before we receive the getheaders. We'd then send # an INV for the next block and receive two getheaders - one for the # IBD and one for the INV. We'd respond to both and could get # unexpectedly disconnected if the DoS score for that error is 50. self.nodes[0].p2p.wait_for_getheaders(timeout=timeout) def reconnect_p2p(self, timeout=60): """Tear down and bootstrap the P2P connection to the node. The node gets disconnected several times in this test. This helper method reconnects the p2p and restarts the network thread.""" self.nodes[0].disconnect_p2ps() self.bootstrap_p2p(timeout=timeout) def send_blocks(self, blocks, success=True, reject_reason=None, force_send=False, reconnect=False, timeout=60): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" self.nodes[0].p2p.send_blocks_and_test(blocks, self.nodes[0], success=success, reject_reason=reject_reason, force_send=force_send, timeout=timeout, expect_disconnect=reconnect) if reconnect: self.reconnect_p2p(timeout=timeout) if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/feature_blocksdir.py b/test/functional/feature_blocksdir.py index 1f58b57dd..d38ed7e9f 100755 --- a/test/functional/feature_blocksdir.py +++ b/test/functional/feature_blocksdir.py @@ -1,43 +1,46 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the blocksdir option. """ import os import shutil -from test_framework.test_framework import BitcoinTestFramework, initialize_datadir +from test_framework.test_framework import ( + BitcoinTestFramework, + initialize_datadir, +) class BlocksdirTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): self.stop_node(0) assert os.path.isdir(os.path.join( self.nodes[0].datadir, self.chain, "blocks")) assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "blocks")) shutil.rmtree(self.nodes[0].datadir) initialize_datadir(self.options.tmpdir, 0, self.chain) self.log.info("Starting with nonexistent blocksdir ...") blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir') self.nodes[0].assert_start_raises_init_error( ["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path)) os.mkdir(blocksdir_path) self.log.info("Starting with existing blocksdir ...") self.start_node(0, ["-blocksdir=" + blocksdir_path]) self.log.info("mining blocks..") self.nodes[0].generatetoaddress( 10, self.nodes[0].get_deterministic_priv_key().address) assert os.path.isfile(os.path.join( blocksdir_path, self.chain, "blocks", "blk00000.dat")) assert os.path.isdir(os.path.join( self.nodes[0].datadir, self.chain, "blocks", "index")) if __name__ == '__main__': BlocksdirTest().main() diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 92f4e5128..7f449df7e 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -1,218 +1,221 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP65 (CHECKLOCKTIMEVERIFY). Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height 1351. """ -from test_framework.blocktools import create_block, create_coinbase, create_transaction, make_conform_to_ctor +from test_framework.blocktools import ( + create_block, + create_coinbase, + create_transaction, + make_conform_to_ctor, +) from test_framework.messages import ( CTransaction, FromHex, + ToHex, msg_block, msg_tx, - ToHex, -) -from test_framework.p2p import ( - P2PInterface, ) +from test_framework.p2p import P2PInterface from test_framework.script import ( - CScript, - CScriptNum, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE, + CScript, + CScriptNum, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal CLTV_HEIGHT = 1351 def cltv_lock_to_height(node, tx, to_address, amount, height=-1): '''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make a transaction that spends it. This transforms the output script to anyone can spend (OP_TRUE) if the lock time condition is valid. Default height is -1 which leads CLTV to fail TODO: test more ways that transactions using CLTV could be invalid (eg locktime requirements fail, sequence time requirements fail, etc). ''' height_op = OP_1NEGATE if(height > 0): tx.vin[0].nSequence = 0 tx.nLockTime = height height_op = CScriptNum(height) tx.vout[0].scriptPubKey = CScript( [height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE]) pad_tx(tx) fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex'] fundtx = FromHex(CTransaction(), fundtx_raw) fundtx.rehash() # make spending tx inputs = [{ "txid": fundtx.hash, "vout": 0 }] output = {to_address: amount} spendtx_raw = node.createrawtransaction(inputs, output) spendtx = FromHex(CTransaction(), spendtx_raw) pad_tx(spendtx) return fundtx, spendtx class BIP65Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [[ '-whitelist=noban@127.0.0.1', '-par=1', # Use only one script thread to get the exact reject reason for testing '-acceptnonstdtxn=1', # cltv_invalidate is nonstandard ]] self.setup_clean_chain = True self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_txids = [self.nodes[0].getblock( b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block") fundtx = create_transaction(self.nodes[0], self.coinbase_txids[0], self.nodeaddress, amount=49990000) fundtx, spendtx = cltv_lock_to_height( self.nodes[0], fundtx, self.nodeaddress, 49980000) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase( CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(fundtx) # include the -1 CLTV in block block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block") block.nVersion = 4 fundtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=49990000) fundtx, spendtx = cltv_lock_to_height( self.nodes[0], fundtx, self.nodeaddress, 49980000) # The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is # valid. self.nodes[0].p2p.send_and_ping(msg_tx(fundtx)) assert fundtx.hash in self.nodes[0].getrawmempool() # Mine a block containing the funding transaction block.vtx.append(fundtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # We show that this tx is invalid due to CLTV by getting it # rejected from the mempool for exactly that reason. assert_equal( [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Negative locktime)'}], self.nodes[0].testmempoolaccept( rawtxs=[spendtx.serialize().hex()], maxfeerate=0) ) rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet( ToHex(spendtx)) # Couldn't complete signature due to CLTV assert rejectedtx_signed['errors'][0]['error'] == 'Negative locktime' tip = block.hash block_time += 1 block = create_block( block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['ConnectBlock {} failed, blk-bad-inputs'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") fundtx = create_transaction(self.nodes[0], self.coinbase_txids[2], self.nodeaddress, amount=49990000) fundtx, spendtx = cltv_lock_to_height( self.nodes[0], fundtx, self.nodeaddress, 49980000, CLTV_HEIGHT) # make sure sequence is nonfinal and locktime is good spendtx.vin[0].nSequence = 0xfffffffe spendtx.nLockTime = CLTV_HEIGHT # both transactions are fully valid self.nodes[0].sendrawtransaction(ToHex(fundtx)) self.nodes[0].sendrawtransaction(ToHex(spendtx)) # Modify the transactions in the block to be valid against CLTV block.vtx.pop(1) block.vtx.append(fundtx) block.vtx.append(spendtx) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is now valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) if __name__ == '__main__': BIP65Test().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 8ce745cbd..655d6457f 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -1,677 +1,677 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test activation of the first version bits soft fork. This soft fork will activate the following BIPS: BIP 68 - nSequence relative lock times BIP 112 - CHECKSEQUENCEVERIFY BIP 113 - MedianTimePast semantics for nLockTime regtest lock-in with 108/144 block signalling activation after a further 144 blocks mine 82 blocks whose coinbases will be used to generate inputs for our tests mine 489 blocks and seed block chain with the 82 inputs will use for our tests at height 572 mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered mine 1 block and test that enforcement has triggered (which triggers ACTIVE) Test BIP 113 is enforced Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height Mine 1 block so next height is 581 and test BIP 68 now passes time but not height Mine 1 block so next height is 582 and test BIP 68 now passes time and height Test that BIP 112 is enforced Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates And that after the soft fork activates transactions pass and fail as they should according to the rules. For each BIP, transactions of versions 1 and 2 will be tested. ---------------- BIP 113: bip113tx - modify the nLocktime variable BIP 68: bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below BIP 112: bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112tx_special - test negative argument to OP_CSV """ +import time from decimal import Decimal from itertools import product -import time from test_framework.blocktools import ( create_block, create_coinbase, make_conform_to_ctor, ) from test_framework.messages import XEC, CTransaction, FromHex, ToHex from test_framework.p2p import P2PDataStore from test_framework.script import ( - CScript, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE, + CScript, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.txtools import pad_tx from test_framework.util import assert_equal BASE_RELATIVE_LOCKTIME = 10 SEQ_DISABLE_FLAG = 1 << 31 SEQ_RANDOM_HIGH_BIT = 1 << 25 SEQ_TYPE_FLAG = 1 << 22 SEQ_RANDOM_LOW_BIT = 1 << 18 def relative_locktime(sdf, srhb, stf, srlb): """Returns a locktime with certain bits set.""" locktime = BASE_RELATIVE_LOCKTIME if sdf: locktime |= SEQ_DISABLE_FLAG if srhb: locktime |= SEQ_RANDOM_HIGH_BIT if stf: locktime |= SEQ_TYPE_FLAG if srlb: locktime |= SEQ_RANDOM_LOW_BIT return locktime def all_rlt_txs(txs): return [tx['tx'] for tx in txs] def get_csv_status(node): height = node.getblockchaininfo()['blocks'] return height >= 576 def create_transaction(node, txid, to_address, *, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) tx = FromHex(CTransaction(), rawtx) return tx def sign_transaction(node, unsignedtx): rawtx = ToHex(unsignedtx) signresult = node.signrawtransactionwithwallet(rawtx) tx = FromHex(CTransaction(), signresult['hex']) return tx def spend_tx(node, prev_tx, address): spendtx = create_transaction( node, prev_tx.hash, address, amount=(prev_tx.vout[0].nValue - 1000) / XEC) spendtx.nVersion = prev_tx.nVersion pad_tx(spendtx) spendtx.rehash() return spendtx def create_bip112special(node, input, txversion, address): tx = create_transaction( node, input, address, amount=Decimal("49980000")) tx.nVersion = txversion tx.vout[0].scriptPubKey = CScript( [-1, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = sign_transaction(node, tx) signtx.rehash() return signtx def send_generic_input_tx(node, coinbases, address): return node.sendrawtransaction(ToHex(sign_transaction(node, create_transaction( node, node.getblock(coinbases.pop())['tx'][0], address, amount=Decimal("49990000"))))) def create_bip68txs(node, bip68inputs, txversion, address, locktime_delta=0): """Returns a list of bip68 transactions with different bits set.""" txs = [] assert len(bip68inputs) >= 16 for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): locktime = relative_locktime(sdf, srhb, stf, srlb) tx = create_transaction( node, bip68inputs[i], address, amount=Decimal("49980000")) tx.nVersion = txversion tx.vin[0].nSequence = locktime + locktime_delta tx = sign_transaction(node, tx) tx.rehash() txs.append({'tx': tx, 'sdf': sdf, 'stf': stf}) return txs def create_bip112txs(node, bip112inputs, varyOP_CSV, txversion, address, locktime_delta=0): """Returns a list of bip112 transactions with different bits set.""" txs = [] assert len(bip112inputs) >= 16 for i, (sdf, srhb, stf, srlb) in enumerate(product(*[[True, False]] * 4)): locktime = relative_locktime(sdf, srhb, stf, srlb) tx = create_transaction( node, bip112inputs[i], address, amount=Decimal("49980000")) if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME + locktime_delta else: # vary nSequence instead, OP_CSV is fixed tx.vin[0].nSequence = locktime + locktime_delta tx.nVersion = txversion if (varyOP_CSV): tx.vout[0].scriptPubKey = CScript( [locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) else: tx.vout[0].scriptPubKey = CScript( [BASE_RELATIVE_LOCKTIME, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = sign_transaction(node, tx) signtx.rehash() txs.append({'tx': signtx, 'sdf': sdf, 'stf': stf}) return txs class BIP68_112_113Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=noban@127.0.0.1', '-blockversion=4']] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def generate_blocks(self, number): test_blocks = [] for _ in range(number): block = self.create_test_block([]) test_blocks.append(block) self.last_block_time += 600 self.tip = block.sha256 self.tipheight += 1 return test_blocks def create_test_block(self, txs, version=536870912): block = create_block(self.tip, create_coinbase( self.tipheight + 1), self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block # Create a block with given txs, and spend these txs in the same block. # Spending utxos in the same block is OK as long as nSequence is not enforced. # Otherwise a number of intermediate blocks should be generated, and this # method should not be used. def create_test_block_spend_utxos(self, node, txs, version=536870912): block = self.create_test_block(txs, version) block.vtx.extend([spend_tx(node, tx, self.nodeaddress) for tx in txs]) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block def send_blocks(self, blocks, success=True): """Sends blocks to test node. Syncs and verifies that tip has advanced to most recent block. Call with success = False if the tip shouldn't advance to the most recent block.""" self.nodes[0].p2p.send_blocks_and_test( blocks, self.nodes[0], success=success) def run_test(self): self.nodes[0].add_p2p_connection(P2PDataStore()) self.log.info("Generate blocks in the past for coinbase outputs.") # Enough to build up to 1000 blocks 10 minutes apart without worrying # about getting into the future long_past_time = int(time.time()) - 600 * 1000 # Enough so that the generated blocks will still all be before # long_past_time self.nodes[0].setmocktime(long_past_time - 100) # 82 blocks generated for inputs self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # Set time back to present so yielded blocks aren't in the future as # we advance last_block_time self.nodes[0].setmocktime(0) # height of the next block to build self.tipheight = 82 self.last_block_time = long_past_time self.tip = int(self.nodes[0].getbestblockhash(), 16) self.nodeaddress = self.nodes[0].getnewaddress() # CSV is not activated yet. assert_equal(get_csv_status(self.nodes[0]), False) # Generate 489 more version 4 blocks test_blocks = self.generate_blocks(489) # Test #1 self.send_blocks(test_blocks) # Still not activated. assert_equal(get_csv_status(self.nodes[0]), False) # Inputs at height = 572 # # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for _ in range(16): bip68inputs.append(send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be # prepended to spending scriptSig) bip112basicinputs = [] for _ in range(2): inputs = [] for _ in range(16): inputs.append(send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP # (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for _ in range(2): inputs = [] for _ in range(16): inputs.append(send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to # spending scriptSig) bip112specialinput = send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress) # 1 normal input bip113input = send_generic_input_tx( self.nodes[0], self.coinbase_blocks, self.nodeaddress) self.nodes[0].setmocktime(self.last_block_time + 600) # 1 block generated for inputs to be in chain at height 572 inputblockhash = self.nodes[0].generate(1)[0] self.nodes[0].setmocktime(0) self.tip = int(inputblockhash, 16) self.tipheight += 1 self.last_block_time += 600 assert_equal(len(self.nodes[0].getblock( inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2) # Test #2 self.send_blocks(test_blocks) self.log.info( "Not yet activated, height = 574 (will activate for block 576, not 575)") assert_equal(get_csv_status(self.nodes[0]), False) # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to # put in appropriate block time bip113tx_v1 = create_transaction( self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49980000")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = create_transaction( self.nodes[0], bip113input, self.nodeaddress, amount=Decimal("49980000")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes bip68txs_v1 = create_bip68txs( self.nodes[0], bip68inputs, 1, self.nodeaddress) bip68txs_v2 = create_bip68txs( self.nodes[0], bip68inputs, 2, self.nodeaddress) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = create_bip112txs( self.nodes[0], bip112basicinputs[0], False, 1, self.nodeaddress) bip112txs_vary_nSequence_v2 = create_bip112txs( self.nodes[0], bip112basicinputs[0], False, 2, self.nodeaddress) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = create_bip112txs( self.nodes[0], bip112basicinputs[1], False, 1, self.nodeaddress, -1) bip112txs_vary_nSequence_9_v2 = create_bip112txs( self.nodes[0], bip112basicinputs[1], False, 2, self.nodeaddress, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV # OP_DROP inputs bip112txs_vary_OP_CSV_v1 = create_bip112txs( self.nodes[0], bip112diverseinputs[0], True, 1, self.nodeaddress) bip112txs_vary_OP_CSV_v2 = create_bip112txs( self.nodes[0], bip112diverseinputs[0], True, 2, self.nodeaddress) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV # OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = create_bip112txs( self.nodes[0], bip112diverseinputs[1], True, 1, self.nodeaddress, -1) bip112txs_vary_OP_CSV_9_v2 = create_bip112txs( self.nodes[0], bip112diverseinputs[1], True, 2, self.nodeaddress, -1) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = create_bip112special( self.nodes[0], bip112specialinput, 1, self.nodeaddress) bip112tx_special_v2 = create_bip112special( self.nodes[0], bip112specialinput, 2, self.nodeaddress) self.log.info("TESTING") self.log.info("Pre-Soft Fork Tests. All txs should pass.") self.log.info("Test version 1 txs") success_txs = [] # add BIP113 tx and -1 CSV tx # = MTP of prior block (not <) but < time put on current block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) success_txs.append( spend_tx(self.nodes[0], bip112tx_special_v1, self.nodeaddress)) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_v1)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v1)]) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v1)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)]) # Test #3 self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") success_txs = [] # add BIP113 tx and -1 CSV tx # = MTP of prior block (not <) but < time put on current block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) success_txs.append( spend_tx(self.nodes[0], bip112tx_special_v2, self.nodeaddress)) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_v2)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v2)]) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v2)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) success_txs.extend([spend_tx(self.nodes[0], tx, self.nodeaddress) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)]) # Test #4 self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 575 so the fork should # now be active for the next block test_blocks = self.generate_blocks(1) # Test #5 self.send_blocks(test_blocks) assert_equal(get_csv_status(self.nodes[0]), False) self.nodes[0].generate(1) assert_equal(get_csv_status(self.nodes[0]), True) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Post-Soft Fork Tests.") self.log.info("BIP 113 tests") # BIP 113 tests should now fail regardless of version number # if nLockTime isn't satisfied by new rules # = MTP of prior block (not <) but < time put on current block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) # = MTP of prior block (not <) but < time put on current block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #6, Test #7 self.send_blocks( [self.create_test_block([bip113tx])], success=False) # BIP 113 tests should now pass if the locktime is < MTP # < MTP of prior block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 bip113signed1 = sign_transaction(self.nodes[0], bip113tx_v1) # < MTP of prior block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 bip113signed2 = sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #8, Test #9 self.send_blocks([self.create_test_block([bip113tx])]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4) # Test #10 self.send_blocks(test_blocks) self.log.info("BIP 68 tests") self.log.info("Test version 1 txs - all should still pass") success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) # Test #11 self.send_blocks([self.create_test_block(success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("Test version 2 txs") # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass bip68success_txs = [tx['tx'] for tx in bip68txs_v2 if tx['sdf']] self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and # delta time = 8 * 600 < 10 * 512 bip68timetxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and tx['stf']] for tx in bip68timetxs: # Test #13 - Test #16 self.send_blocks([self.create_test_block([tx])], success=False) bip68heighttxs = [tx['tx'] for tx in bip68txs_v2 if not tx['sdf'] and not tx['stf']] for tx in bip68heighttxs: # Test #17 - Test #20 self.send_blocks([self.create_test_block([tx])], success=False) # Advance one block to 581 test_blocks = self.generate_blocks(1) # Test #21 self.send_blocks(test_blocks,) # Height txs should fail and time txs should now pass 9 * 600 > 10 * # 512 bip68success_txs.extend(bip68timetxs) # Test #22 self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: # Test #23 - Test #26 self.send_blocks([self.create_test_block([tx])], success=False) # Advance one block to 582 test_blocks = self.generate_blocks(1) # Test #27 self.send_blocks(test_blocks) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) # Test #28 self.send_blocks([self.create_test_block(bip68success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) self.log.info("BIP 112 tests") self.log.info("Test version 1 txs") # -1 OP_CSV tx should fail # Test #29 self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [bip112tx_special_v1])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 1 txs should still pass success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v1 if tx['sdf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if tx['sdf']] # Test #30 self.send_blocks( [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, # version 1 txs should now fail fail_txs = all_rlt_txs(bip112txs_vary_nSequence_v1) fail_txs += all_rlt_txs(bip112txs_vary_nSequence_9_v1) fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v1 if not tx['sdf']] for tx in fail_txs: # Test #31 - Test #78 self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) self.log.info("Test version 2 txs") # -1 OP_CSV tx should fail # Test #79 self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [bip112tx_special_v2])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 2 txs should pass success_txs = [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if tx['sdf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if tx['sdf']] # Test #80 self.send_blocks( [self.create_test_block_spend_utxos(self.nodes[0], success_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all # remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch # or failing the CSV check fail_txs = all_rlt_txs(bip112txs_vary_nSequence_9_v2) fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_9_v2 if not tx['sdf']] for tx in fail_txs: # Test #81 - Test #104 self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if tx['sdf']] for tx in fail_txs: # Test #105 - Test #112 self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # If sequencelock types mismatch, tx should fail fail_txs = [tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and tx['stf']] fail_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']] for tx in fail_txs: # Test #113 - Test #120 self.send_blocks([self.create_test_block_spend_utxos( self.nodes[0], [tx])], success=False) # Remaining txs should pass, just test masking works properly success_txs = [ tx['tx'] for tx in bip112txs_vary_nSequence_v2 if not tx['sdf'] and not tx['stf']] success_txs += [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and not tx['stf']] # Test #121 self.send_blocks([self.create_test_block(success_txs)]) # Spending the previous block utxos requires a difference of 10 blocks (nSequence = 10). # Generate 9 blocks then spend in the 10th block = self.nodes[0].getbestblockhash() self.last_block_time += 600 self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #122 self.send_blocks(self.generate_blocks(9)) spend_txs = [] for tx in success_txs: raw_tx = spend_tx(self.nodes[0], tx, self.nodeaddress) raw_tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME raw_tx.rehash() spend_txs.append(raw_tx) # Test #123 self.send_blocks([self.create_test_block(spend_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works # properly time_txs = [] for tx in [tx['tx'] for tx in bip112txs_vary_OP_CSV_v2 if not tx['sdf'] and tx['stf']]: signtx = sign_transaction(self.nodes[0], tx) time_txs.append(signtx) # Test #124 self.send_blocks([self.create_test_block(time_txs)]) # Spending the previous block utxos requires a block time difference of # at least 10 * 512s (nSequence = 10). # Generate 8 blocks then spend in the 9th (9 * 600 > 10 * 512) block = self.nodes[0].getbestblockhash() self.last_block_time += 600 self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #125 self.send_blocks(self.generate_blocks(8)) spend_txs = [] for tx in time_txs: raw_tx = spend_tx(self.nodes[0], tx, self.nodeaddress) raw_tx.vin[0].nSequence = BASE_RELATIVE_LOCKTIME | SEQ_TYPE_FLAG raw_tx.rehash() spend_txs.append(raw_tx) # Test #126 self.send_blocks([self.create_test_block(spend_txs)]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # TODO: Test empty stack fails if __name__ == '__main__': BIP68_112_113Test().main() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index 96b202059..3fcc583bb 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -1,331 +1,328 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test recovery from a crash during chainstate writing. - 4 nodes * node0, node1, and node2 will have different dbcrash ratios, and different dbcache sizes * node3 will be a regular node, with no crashing. * The nodes will not connect to each other. - use default test framework starting chain. initialize starting_tip_height to tip height. - Main loop: * generate lots of transactions on node3, enough to fill up a block. * uniformly randomly pick a tip height from starting_tip_height to tip_height; with probability 1/(height_difference+4), invalidate this block. * mine enough blocks to overtake tip_height at start of loop. * for each node in [node0,node1,node2]: - for each mined block: * submit block to node * if node crashed on/after submitting: - restart until recovery succeeds - check that utxo matches node3 using gettxoutsetinfo""" import errno import http.client import random import time from test_framework.blocktools import create_confirmed_utxos from test_framework.cdefs import DEFAULT_MAX_BLOCK_SIZE from test_framework.messages import ( XEC, COutPoint, CTransaction, CTxIn, CTxOut, ToHex, ) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - hex_str_to_bytes, -) +from test_framework.util import assert_equal, hex_str_to_bytes class ChainstateWriteCrashTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = False self.rpc_timeout = 480 self.supports_cli = False # Set -maxmempool=0 to turn off mempool memory sharing with dbcache # Set -rpcservertimeout=900 to reduce socket disconnects in this # long-running test self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000", "-noparkdeepreorg"] # Set different crash ratios and cache sizes. Note that not all of # -dbcache goes to the in-memory coins cache. self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args # Node3 is a normal node with default args, except will mine full blocks # and non-standard txs (e.g. txs with "dust" outputs) self.node3_args = [ "-blockmaxsize={}".format(DEFAULT_MAX_BLOCK_SIZE), "-acceptnonstdtxn"] self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.add_nodes(self.num_nodes, extra_args=self.extra_args) self.start_nodes() self.import_deterministic_coinbase_privkeys() # Leave them unconnected, we'll use submitblock directly in this test def restart_node(self, node_index, expected_tip): """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up after 60 seconds. Returns the utxo hash of the given node.""" time_start = time.time() while time.time() - time_start < 120: try: # Any of these RPC calls could throw due to node crash self.start_node(node_index) self.nodes[node_index].waitforblock(expected_tip) utxo_hash = self.nodes[node_index].gettxoutsetinfo()[ 'hash_serialized'] return utxo_hash except Exception: # An exception here should mean the node is about to crash. # If bitcoind exits, then try again. wait_for_node_exit() # should raise an exception if bitcoind doesn't exit. self.wait_for_node_exit(node_index, timeout=15) self.crashed_on_restart += 1 time.sleep(1) # If we got here, bitcoind isn't coming back up on restart. Could be a # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- # perhaps we generated a test case that blew up our cache? # TODO: If this happens a lot, we should try to restart without -dbcrashratio # and make sure that recovery happens. raise AssertionError( "Unable to successfully restart node {} in allotted time".format(node_index)) def submit_block_catch_error(self, node_index, block): """Try submitting a block to the given node. Catch any exceptions that indicate the node has crashed. Returns true if the block was submitted successfully; false otherwise.""" try: self.nodes[node_index].submitblock(block) return True except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e: self.log.debug( "node {} submitblock raised exception: {}".format(node_index, e)) return False except OSError as e: self.log.debug( "node {} submitblock raised OSError exception: errno={}".format(node_index, e.errno)) if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: # The node has likely crashed return False else: # Unexpected exception, raise raise def sync_node3blocks(self, block_hashes): """Use submitblock to sync node3's chain with the other nodes If submitblock fails, restart the node and get the new utxo hash. If any nodes crash while updating, we'll compare utxo hashes to ensure recovery was successful.""" node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized'] # Retrieve all the blocks from node3 blocks = [] for block_hash in block_hashes: blocks.append( [block_hash, self.nodes[3].getblock(block_hash, False)]) # Deliver each block to each other node for i in range(3): nodei_utxo_hash = None self.log.debug("Syncing blocks to node {}".format(i)) for (block_hash, block) in blocks: # Get the block from node3, and submit to node_i self.log.debug("submitting block {}".format(block_hash)) if not self.submit_block_catch_error(i, block): # TODO: more carefully check that the crash is due to -dbcrashratio # (change the exit code perhaps, and check that here?) self.wait_for_node_exit(i, timeout=30) self.log.debug( "Restarting node {} after block hash {}".format(i, block_hash)) nodei_utxo_hash = self.restart_node(i, block_hash) assert nodei_utxo_hash is not None self.restart_counts[i] += 1 else: # Clear it out after successful submitblock calls -- the cached # utxo hash will no longer be correct nodei_utxo_hash = None # Check that the utxo hash matches node3's utxo set # NOTE: we only check the utxo set if we had to restart the node # after the last block submitted: # - checking the utxo hash causes a cache flush, which we don't # want to do every time; so # - we only update the utxo cache after a node restart, since flushing # the cache is a no-op at that point if nodei_utxo_hash is not None: self.log.debug( "Checking txoutsetinfo matches for node {}".format(i)) assert_equal(nodei_utxo_hash, node3_utxo_hash) def verify_utxo_hash(self): """Verify that the utxo hash of each node matches node3. Restart any nodes that crash while querying.""" node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized'] self.log.info("Verifying utxo hash matches for all nodes") for i in range(3): try: nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()[ 'hash_serialized'] except OSError: # probably a crash on db flushing nodei_utxo_hash = self.restart_node( i, self.nodes[3].getbestblockhash()) assert_equal(nodei_utxo_hash, node3_utxo_hash) def generate_small_transactions(self, node, count, utxo_list): FEE = 1000 # TODO: replace this with node relay fee based calculation num_transactions = 0 random.shuffle(utxo_list) while len(utxo_list) >= 2 and num_transactions < count: tx = CTransaction() input_amount = 0 for _ in range(2): utxo = utxo_list.pop() tx.vin.append( CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) input_amount += int(utxo['amount'] * XEC) output_amount = (input_amount - FEE) // 3 if output_amount <= 0: # Sanity check -- if we chose inputs that are too small, skip continue for _ in range(3): tx.vout.append( CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))['hex'] node.sendrawtransaction(tx_signed_hex) num_transactions += 1 def run_test(self): # Track test coverage statistics self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 self.crashed_on_restart = 0 # Track count of crashes during recovery # Start by creating a lot of utxos on node3 initial_height = self.nodes[3].getblockcount() utxo_list = create_confirmed_utxos(self.nodes[3], 5000) self.log.info("Prepped {} utxo entries".format(len(utxo_list))) # Sync these blocks with the other nodes block_hashes_to_sync = [] for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) self.log.debug("Syncing {} blocks with other nodes".format( len(block_hashes_to_sync))) # Syncing the blocks could cause nodes to crash, so the test begins # here. self.sync_node3blocks(block_hashes_to_sync) starting_tip_height = self.nodes[3].getblockcount() # Set mock time to the last block time. This will allow us to increase # the time at each loop so the block hash will always differ for the # same block height, and avoid duplication. # Note that the current time can be behind the block time due to the # way the miner sets the block time. tip = self.nodes[3].getbestblockhash() block_time = self.nodes[3].getblockheader(tip)['time'] self.nodes[3].setmocktime(block_time) # Main test loop: # each time through the loop, generate a bunch of transactions, # and then either mine a single new block on the tip, or some-sized # reorg. for i in range(40): block_time += 10 self.nodes[3].setmocktime(block_time) self.log.info( "Iteration {}, generating 2500 transactions {}".format( i, self.restart_counts)) # Generate a bunch of small-ish transactions self.generate_small_transactions(self.nodes[3], 2500, utxo_list) # Pick a random block between current tip, and starting tip current_height = self.nodes[3].getblockcount() random_height = random.randint(starting_tip_height, current_height) self.log.debug("At height {}, considering height {}".format( current_height, random_height)) if random_height > starting_tip_height: # Randomly reorg from this point with some probability (1/4 for # tip, 1/5 for tip-1, ...) if random.random() < 1.0 / (current_height + 4 - random_height): self.log.debug( "Invalidating block at height {}".format(random_height)) self.nodes[3].invalidateblock( self.nodes[3].getblockhash(random_height)) # Now generate new blocks until we pass the old tip height self.log.debug("Mining longer tip") block_hashes = [] while current_height + 1 > self.nodes[3].getblockcount(): block_hashes.extend(self.nodes[3].generatetoaddress( nblocks=min(10, current_height + 1 - self.nodes[3].getblockcount()), # new address to avoid mining a block that has just been # invalidated address=self.nodes[3].getnewaddress(), )) self.log.debug("Syncing %d new blocks...", len(block_hashes)) self.sync_node3blocks(block_hashes) utxo_list = self.nodes[3].listunspent() self.log.debug("Node3 utxo count: {}".format(len(utxo_list))) # Check that the utxo hashes agree with node3 # Useful side effect: each utxo cache gets flushed here, so that we # won't get crashes on shutdown at the end of the test. self.verify_utxo_hash() # Check the test coverage self.log.info("Restarted nodes: {}; crashes on restart: {}".format( self.restart_counts, self.crashed_on_restart)) # If no nodes were restarted, we didn't test anything. assert self.restart_counts != [0, 0, 0] # Make sure we tested the case of crash-during-recovery. assert self.crashed_on_restart > 0 # Warn if any of the nodes escaped restart. for i in range(3): if self.restart_counts[i] == 0: self.log.warning( "Node {} never crashed during utxo flush!".format(i)) if __name__ == "__main__": ChainstateWriteCrashTest().main() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index c98eb0095..4cd1f3d0f 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -1,112 +1,116 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP66 (DER SIG). Test that the DERSIG soft-fork activates at (regtest) height 1251. """ -from test_framework.blocktools import create_block, create_coinbase, create_transaction +from test_framework.blocktools import ( + create_block, + create_coinbase, + create_transaction, +) from test_framework.messages import msg_block from test_framework.p2p import P2PInterface from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal DERSIG_HEIGHT = 1251 # A canonical signature consists of: # <30> <02> <02> def unDERify(tx): """ Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. """ scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) class BIP66Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [['-whitelist=noban@127.0.0.1']] self.setup_clean_chain = True self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1)) self.coinbase_txids = [self.nodes[0].getblock( b)['tx'][0] for b in self.nodes[0].generate(DERSIG_HEIGHT - 1)] self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block( int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000002)'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1000000) unDERify(spendtx) spendtx.rehash() # First we show that this tx is valid except for DERSIG by getting it # rejected from the mempool for exactly that reason. assert_equal( [{'txid': spendtx.hash, 'allowed': False, 'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)'}], self.nodes[0].testmempoolaccept( rawtxs=[spendtx.serialize().hex()], maxfeerate=0) ) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() with self.nodes[0].assert_debug_log(expected_msgs=['ConnectBlock {} failed, blk-bad-inputs'.format(block.hash)]): self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) self.nodes[0].p2p.sync_with_ping() self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted") block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_txids[1], self.nodeaddress, amount=1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': BIP66Test().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index f1d40d263..51eab1715 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -1,180 +1,180 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respected even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. """ -from collections import defaultdict import time +from collections import defaultdict -from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.blocktools import mine_big_block -from test_framework.messages import CInv, MSG_BLOCK, msg_getdata +from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE +from test_framework.messages import MSG_BLOCK, CInv, msg_getdata from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal class TestP2PConn(P2PInterface): def __init__(self): super().__init__() self.block_receive_map = defaultdict(int) def on_inv(self, message): pass def on_block(self, message): message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 class MaxUploadTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # Start a node with maxuploadtarget of 200 MB (/24h) self.extra_args = [[ "-maxuploadtarget=200", "-acceptnonstdtxn=1", # bump because mocktime might cause a disconnect otherwise "-peertimeout=9999", ]] self.supports_cli = False # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache = [] def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2 * 60 * 60 * 24 * 7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # p2p_conns[0] will only request old blocks # p2p_conns[1] will only request new blocks # p2p_conns[2] will test resetting the counters p2p_conns = [] for _ in range(3): p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn())) # Now mine a big block mine_big_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24) # Mine one more block, so that the prior block looks old mine_big_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # p2p_conns[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block)) max_bytes_per_day = 200 * 1024 * 1024 daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 144MB will be reserved for relaying new blocks, so expect this to # succeed for ~70 tries. for i in range(success_count): p2p_conns[0].send_and_ping(getdata_request) assert_equal(p2p_conns[0].block_receive_map[big_old_block], i + 1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for _ in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info( "Peer 0 disconnected after downloading old block too many times") # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. # We'll try 200 times getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)] for i in range(200): p2p_conns[1].send_and_ping(getdata_request) assert_equal(p2p_conns[1].block_receive_map[big_new_block], i + 1) self.log.info("Peer 1 able to repeatedly download new block") # But if p2p_conns[1] tries for an old block, it gets disconnected # too. getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)] p2p_conns[1].send_message(getdata_request) p2p_conns[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and p2p_conns[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) p2p_conns[2].sync_with_ping() p2p_conns[2].send_and_ping(getdata_request) assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") self.nodes[0].disconnect_p2ps() self.log.info("Restarting node 0 with download permission" " and 1MB maxuploadtarget") self.restart_node(0, ["-whitelist=download@127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestP2PConn()) # retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)] for i in range(20): self.nodes[0].p2p.send_and_ping(getdata_request) assert_equal( self.nodes[0].p2p.block_receive_map[big_new_block], i + 1) getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)] self.nodes[0].p2p.send_and_ping(getdata_request) self.log.info( "Peer still connected after trying to download old block (download permission)") peer_info = self.nodes[0].getpeerinfo() # node is still connected assert_equal(len(peer_info), 1) assert_equal(peer_info[0]['permissions'], ['download']) if __name__ == '__main__': MaxUploadTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 1dfd81825..7f928feff 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -1,231 +1,227 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoind with different proxy configuration. Test plan: - Start bitcoind's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on bitcoind side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create bitcoinds that connect to them - Manipulate the bitcoinds using addnode (onetry) an observe effects addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name """ import os import socket from test_framework.netutil import test_ipv6_local from test_framework.socks5 import ( AddressType, Socks5Command, Socks5Configuration, Socks5Server, ) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, - PORT_MIN, - PORT_RANGE, -) +from test_framework.util import PORT_MIN, PORT_RANGE, assert_equal RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports class ProxyTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = True def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ( '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ( '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.warning("Testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), # which return NET_UNROUTABLE for localhost args = [ ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-proxyrandomize=1'], ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-onion={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=0'], ['-listen', '-proxy={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1]), '-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() def node_test(self, node, proxies, auth, test_onion=True): rv = [] # Test: outgoing IPv4 connection through node node.addnode("15.61.23.23:1234", "onetry") cmd = proxies[0].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, # even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if self.have_ipv6: # Test: outgoing IPv6 connection through node node.addnode( "[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") cmd = proxies[1].queue.get() assert isinstance(cmd, Socks5Command) # Note: bitcoind's SOCKS5 implementation only sends atyp # DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if test_onion: # Test: outgoing onion connection through node node.addnode("bitcoinostk4e4re.onion:8333", "onetry") cmd = proxies[2].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) # Test: outgoing DNS name connection through node node.addnode("node.noumenon:8333", "onetry") cmd = proxies[3].queue.get() assert isinstance(cmd, Socks5Command) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) return rv def run_test(self): # basic -proxy self.node_test( self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test( self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test( self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are # unique credentials = set((x.username, x.password) for x in rv) assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test( self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d): r = {} for x in d['networks']: r[x['name']] = x return r # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n0[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n2[net]['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n3[net]['proxy'], '[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1])) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) if __name__ == '__main__': ProxyTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index c6d2c0ed4..017fb5cb7 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -1,520 +1,520 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the pruning code. WARNING: This test uses 4GB of disk space. This test takes 30 mins or more (up to 2 hours) """ import os from test_framework.blocktools import create_coinbase from test_framework.messages import CBlock, ToHex -from test_framework.script import CScript, OP_RETURN, OP_NOP +from test_framework.script import OP_NOP, OP_RETURN, CScript from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, connect_nodes, disconnect_nodes, ) # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def mine_large_blocks(node, n): # Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN # followed by 950k of OP_NOP. This would be non-standard in a non-coinbase # transaction but is consensus valid. # Set the nTime if this is the first time this function has been called. # A static variable ensures that time is monotonicly increasing and is therefore # different for each block created => blockhash is unique. if "nTimes" not in mine_large_blocks.__dict__: mine_large_blocks.nTime = 0 # Get the block parameters for the first block big_script = CScript([OP_RETURN] + [OP_NOP] * 950000) best_block = node.getblock(node.getbestblockhash()) height = int(best_block["height"]) + 1 mine_large_blocks.nTime = max( mine_large_blocks.nTime, int(best_block["time"])) + 1 previousblockhash = int(best_block["hash"], 16) for _ in range(n): # Build the coinbase transaction (with large scriptPubKey) coinbase_tx = create_coinbase(height) coinbase_tx.vin[0].nSequence = 2 ** 32 - 1 coinbase_tx.vout[0].scriptPubKey = big_script coinbase_tx.rehash() # Build the block block = CBlock() block.nVersion = best_block["version"] block.hashPrevBlock = previousblockhash block.nTime = mine_large_blocks.nTime block.nBits = int('207fffff', 16) block.nNonce = 0 block.vtx = [coinbase_tx] block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Submit to the node node.submitblock(ToHex(block)) previousblockhash = block.sha256 height += 1 mine_large_blocks.nTime += 1 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 self.supports_cli = False # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-prune=550"]] self.rpc_timeout = 120 def skip_test_if_missing_module(self): self.skip_if_no_wallet() def setup_network(self): self.setup_nodes() self.prunedir = os.path.join( self.nodes[2].datadir, self.chain, 'blocks', '') connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[2]) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[0], self.nodes[4]) self.sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args) self.start_nodes() for n in self.nodes: n.importprivkey( privkey=n.get_deterministic_priv_key().key, label='coinbase', rescan=False) def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) self.sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data mine_large_blocks(self.nodes[0], 645) self.sync_blocks(self.nodes[0:5]) def test_height_min(self): assert os.path.isfile(os.path.join( self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early" self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: {}".format( calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this mine_large_blocks(self.nodes[0], 25) # Wait for blk00000.dat to be pruned self.wait_until( lambda: not os.path.isfile( os.path.join(self.prunedir, "blk00000.dat")), timeout=30) self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) assert_greater_than(550, usage) def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for _ in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and # then reorg's when node0 reconnects disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[0], self.nodes[2]) # Mine 24 blocks in node 1 mine_large_blocks(self.nodes[1], 24) # Reorg back with 25 block chain from node 0 mine_large_blocks(self.nodes[0], 25) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[0], self.nodes[2]) self.sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: {}".format( calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) self.forkheight = height - 287 self.forkhash = self.nodes[1].getblockhash(self.forkheight) self.log.info("Invalidating block {} at height {}".format( self.forkhash, self.forkheight)) self.nodes[1].invalidateblock(self.forkhash) # We've now switched to our previously mined-24 block fork on node 1, but that's not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1) curhash = self.nodes[1].getblockhash(self.forkheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(self.forkheight - 1) assert self.nodes[1].getblockcount() == self.forkheight - 1 self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) # Disconnect node1 and generate the new chain disconnect_nodes(self.nodes[0], self.nodes[1]) disconnect_nodes(self.nodes[1], self.nodes[2]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) self.sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: {}".format( self.nodes[2].getblockcount())) self.log.info("Usage possibly still high because of stale blocks in block files: {}".format( calc_usage(self.prunedir))) self.log.info( "Mine 220 more large blocks so we have requisite history") mine_large_blocks(self.nodes[0], 220) self.sync_blocks(self.nodes[0:3], timeout=120) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) assert_greater_than(550, usage) def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']): self.nodes[2].verifychain(checklevel=4, nblocks=0) self.log.info( "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large blocks are in # the block files after it, it is expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer that's on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert_equal(self.nodes[0].getblockcount(), self.mainchainheight) assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") # Wait for Node 2 to reorg to proper height self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900) assert_equal(self.nodes[2].getbestblockhash(), goalbesthash) # Verify we can now have the data for a block previously pruned assert_equal(self.nodes[2].getblock( self.forkhash)["height"], self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.restart_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))[ "time"] + TIMESTAMP_WINDOW else: return index def prune(index): ret = node.pruneblockchain(height=height(index)) assert_equal(ret, node.getblockchaininfo()['pruneheight']) def has_block(index): return os.path.isfile(os.path.join( self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index))) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # Save block transaction count before pruning, assert value block1_details = node.getblock(node.getblockhash(1)) assert_equal(block1_details["nTx"], len(block1_details["tx"])) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # Pruned block should still know the number of transactions assert_equal(node.getblockheader(node.getblockhash(1)) ["nTx"], block1_details["nTx"]) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) assert has_block( 0), "blk00000.dat is missing when should still be there" # Does nothing node.pruneblockchain(height(0)) assert has_block( 0), "blk00000.dat is missing when should still be there" # height=500 should prune first file prune(500) assert not has_block( 0), "blk00000.dat is still there, should be pruned by now" assert has_block( 1), "blk00001.dat is missing when should still be there" # height=650 should prune second file prune(650) assert not has_block( 1), "blk00001.dat is still there, should be pruned by now" # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000) assert has_block( 2), "blk00002.dat is still there, should be pruned by now" # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) assert not has_block( 2), "blk00002.dat is still there, should be pruned by now" assert not has_block( 3), "blk00003.dat is still there, should be pruned by now" # stop node, start back up with auto-prune at 550 MiB, make sure still # runs self.restart_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.restart_node( 2, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], self.nodes[5]) nds = [self.nodes[0], self.nodes[5]] self.sync_blocks(nds, wait=5, timeout=300) self.restart_node( 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") def run_test(self): self.log.info("Warning! This test requires 4GB of disk space") self.log.info("Mining a big blockchain of 995 blocks") self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py index 473a16af8..db1528975 100755 --- a/test/functional/feature_settings.py +++ b/test/functional/feature_settings.py @@ -1,94 +1,93 @@ #!/usr/bin/env python3 # Copyright (c) 2017-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test various command line arguments and configuration file parameters.""" import json - from pathlib import Path from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch from test_framework.util import assert_equal class SettingsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): node, = self.nodes settings = Path(node.datadir, self.chain, "settings.json") conf = Path(node.datadir, "bitcoin.conf") # Assert empty settings file was created self.stop_node(0) with settings.open() as fp: assert_equal(json.load(fp), {}) # Assert settings are parsed and logged with settings.open("w") as fp: json.dump({"string": "string", "num": 5, "bool": True, "null": None, "list": [6, 7]}, fp) with node.assert_debug_log(expected_msgs=[ 'Setting file arg: string = "string"', 'Setting file arg: num = 5', 'Setting file arg: bool = true', 'Setting file arg: null = null', 'Setting file arg: list = [6,7]']): self.start_node(0) self.stop_node(0) # Assert settings are unchanged after shutdown with settings.open() as fp: assert_equal( json.load(fp), { "string": "string", "num": 5, "bool": True, "null": None, "list": [ 6, 7]}) # Test invalid json with settings.open("w") as fp: fp.write("invalid json") node.assert_start_raises_init_error( expected_msg='Unable to parse settings file', match=ErrorMatch.PARTIAL_REGEX) # Test invalid json object with settings.open("w") as fp: fp.write('"string"') node.assert_start_raises_init_error( expected_msg='Found non-object value "string" in settings file', match=ErrorMatch.PARTIAL_REGEX) # Test invalid settings file containing duplicate keys with settings.open("w") as fp: fp.write('{"key": 1, "key": 2}') node.assert_start_raises_init_error( expected_msg='Found duplicate key key in settings file', match=ErrorMatch.PARTIAL_REGEX) # Test invalid settings file is ignored with command line -nosettings with node.assert_debug_log(expected_msgs=['Command-line arg: settings=false']): self.start_node(0, extra_args=["-nosettings"]) self.stop_node(0) # Test invalid settings file is ignored with config file -nosettings with conf.open('a') as conf: conf.write('nosettings=1\n') with node.assert_debug_log(expected_msgs=['Config file arg: [regtest] settings=false']): self.start_node(0) self.stop_node(0) # Test alternate settings path altsettings = Path(node.datadir, "altsettings.json") with altsettings.open("w") as fp: fp.write('{"key": "value"}') with node.assert_debug_log(expected_msgs=['Setting file arg: key = "value"']): self.start_node(0, extra_args=["-settings={}".format(altsettings)]) self.stop_node(0) if __name__ == '__main__': SettingsTest().main() diff --git a/test/functional/feature_shutdown.py b/test/functional/feature_shutdown.py index e156a5992..49e1b74a3 100755 --- a/test/functional/feature_shutdown.py +++ b/test/functional/feature_shutdown.py @@ -1,40 +1,41 @@ #!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test bitcoind shutdown.""" +from threading import Thread + from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, get_rpc_proxy -from threading import Thread def test_long_call(node): block = node.waitfornewblock() assert_equal(block['height'], 0) class ShutdownTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 self.supports_cli = False def run_test(self): node = get_rpc_proxy( self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir) # Force connection establishment by executing a dummy command. node.getblockcount() Thread(target=test_long_call, args=(node,)).start() # Wait until the server is executing the above `waitfornewblock`. self.wait_until( lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2) # Wait 1 second after requesting shutdown but not before the `stop` call # finishes. This is to ensure event loop waits for current connections # to close. self.stop_node(0, wait=1000) if __name__ == '__main__': ShutdownTest().main()