diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index eb1bb8358..210d2b6dc 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -1,217 +1,221 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test logic for skipping signature validation on old blocks. Test logic for skipping signature validation on blocks which we've assumed valid (https://github.com/bitcoin/bitcoin/pull/9484) We build a chain that includes and invalid signature for one of the transactions: 0: genesis block 1: block 1 with coinbase transaction output. 2-101: bury that block with 100 blocks so the coinbase transaction output can be spent 102: a block containing a transaction spending the coinbase transaction output. The transaction has an invalid signature. 103-2202: bury the bad block with just over two weeks' worth of blocks (2100 blocks) Start three nodes: - node0 has no -assumevalid parameter. Try to sync to block 2202. It will reject block 102 and only sync as far as block 101 - node1 has -assumevalid set to the hash of block 102. Try to sync to block 2202. node1 will sync all the way to block 2202. - node2 has -assumevalid set to the hash of block 102. Try to sync to block 200. node2 will reject block 102 since it's assumed valid, but it isn't buried by at least two weeks' work. """ import time from test_framework.blocktools import (create_block, create_coinbase) from test_framework.key import CECKey -from test_framework.mininode import (CBlockHeader, - COutPoint, - CTransaction, - CTxIn, - CTxOut, - network_thread_join, - network_thread_start, - P2PInterface, - msg_block, - msg_headers) +from test_framework.messages import ( + CBlockHeader, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + msg_block, + msg_headers, +) +from test_framework.mininode import ( + network_thread_join, + network_thread_start, + P2PInterface, +) from test_framework.script import (CScript, OP_TRUE) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal from test_framework.txtools import pad_tx +from test_framework.util import assert_equal class BaseNode(P2PInterface): def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) class AssumeValidTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self): self.add_nodes(3) # Start node0. We don't start the other nodes yet since # we need to pre-mine a block with an invalid transaction # signature so we can pass in the block hash as assumevalid. self.start_node(0) def send_blocks_until_disconnected(self, p2p_conn): """Keep sending blocks to the node until we're disconnected.""" for i in range(len(self.blocks)): if p2p_conn.state != "connected": break try: p2p_conn.send_message(msg_block(self.blocks[i])) except IOError as e: assert str(e) == 'Not connected, no pushbuf' break def assert_blockchain_height(self, node, height): """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" last_height = node.getblock(node.getbestblockhash())['height'] timeout = 10 while True: time.sleep(0.25) current_height = node.getblock(node.getbestblockhash())['height'] if current_height != last_height: last_height = current_height if timeout < 0: assert False, "blockchain too short after timeout: {}".format( current_height) timeout - 0.25 continue elif current_height > height: assert False, "blockchain too long: {}".format(current_height) elif current_height == height: break def run_test(self): # Connect to node0 p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # Build the blockchain self.tip = int(self.nodes[0].getbestblockhash(), 16) self.block_time = self.nodes[0].getblock( self.nodes[0].getbestblockhash())['time'] + 1 self.blocks = [] # Get a pubkey for the coinbase TXO coinbase_key = CECKey() coinbase_key.set_secretbytes(b"horsebattery") coinbase_pubkey = coinbase_key.get_pubkey() # Create the first block with a coinbase output to our key height = 1 block = create_block(self.tip, create_coinbase( height, coinbase_pubkey), self.block_time) self.blocks.append(block) self.block_time += 1 block.solve() # Save the coinbase for later self.block1 = block self.tip = block.sha256 height += 1 # Bury the block 100 deep so the coinbase output is spendable for i in range(100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # Create a transaction spending the coinbase output with an invalid (null) signature tx = CTransaction() tx.vin.append( CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b"")) tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE]))) pad_tx(tx) tx.calc_sha256() block102 = create_block( self.tip, create_coinbase(height), self.block_time) self.block_time += 1 block102.vtx.extend([tx]) block102.hashMerkleRoot = block102.calc_merkle_root() block102.rehash() block102.solve() self.blocks.append(block102) self.tip = block102.sha256 self.block_time += 1 height += 1 # Bury the assumed valid block 2100 deep for i in range(2100): block = create_block( self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() self.blocks.append(block) self.tip = block.sha256 self.block_time += 1 height += 1 # We're adding new connections so terminate the network thread self.nodes[0].disconnect_p2ps() network_thread_join() # Start node1 and node2 with assumevalid so they accept a block with a bad signature. self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)]) self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)]) p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) p2p1 = self.nodes[1].add_p2p_connection(BaseNode()) p2p2 = self.nodes[2].add_p2p_connection(BaseNode()) network_thread_start() p2p0.wait_for_verack() p2p1.wait_for_verack() p2p2.wait_for_verack() # send header lists to all three nodes p2p0.send_header_for_blocks(self.blocks[0:2000]) p2p0.send_header_for_blocks(self.blocks[2000:]) p2p1.send_header_for_blocks(self.blocks[0:2000]) p2p1.send_header_for_blocks(self.blocks[2000:]) p2p2.send_header_for_blocks(self.blocks[0:200]) # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) self.assert_blockchain_height(self.nodes[0], 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): p2p1.send_message(msg_block(self.blocks[i])) # Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync. p2p1.sync_with_ping(120) assert_equal(self.nodes[1].getblock( self.nodes[1].getbestblockhash())['height'], 2202) # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) self.assert_blockchain_height(self.nodes[2], 101) if __name__ == '__main__': AssumeValidTest().main() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index c695ca0e7..9d3763564 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -1,471 +1,492 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test BIP68 implementation # +import time + +from test_framework.blocktools import ( + create_block, + create_coinbase, +) +from test_framework.messages import ( + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + FromHex, + ToHex, +) +from test_framework.script import CScript from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.script import * -from test_framework.mininode import * -from test_framework.blocktools import * from test_framework.txtools import pad_tx +from test_framework.util import ( + assert_equal, + assert_greater_than, + assert_raises_rpc_error, + connect_nodes, + disconnect_nodes, + satoshi_round, + sync_blocks, +) SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) # this means use time (0 means height) SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "64: non-BIP68-final" class BIP68Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.extra_args = [["-blockprioritypercentage=0", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-blockprioritypercentage=0", "-acceptnonstdtxn=0", "-maxreorgdepth=-1"]] def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Generate some coins self.nodes[0].generate(110) self.log.info("Running test disable flag") self.test_disable_flag() self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() self.log.info( "Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Verifying nVersion=2 transactions aren't standard") self.test_version2_relay(before_activation=True) self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() self.log.info("Verifying nVersion=2 transactions are now standard") self.test_version2_relay(before_activation=False) self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC utxos = self.nodes[0].listunspent(0, 0) assert(len(utxos) > 0) utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee) * COIN) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] pad_tx(tx1) tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx2) tx2.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( self.nodes[0].getblockcount() - confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending # confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in range(num_outputs): outputs[addresses[i]] = random.randint(1, 20) * 0.01 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for i in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in range(num_inputs): sequence_value = 0xfffffffe # this disables sequence locks # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1, 10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. orig_time = self.get_median_time_past( utxos[j]["confirmations"]) cur_time = self.get_median_time_past(0) # MTP of the tip # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time # lock if random.randint(0, 1) and can_time_lock: # Find first time-lock value that fails, or latest one # that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"] * COIN # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( CTxOut(int(value - self.relayfee * tx_size * COIN / 1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"] if (using_sequence_locks and not should_pass): # This transaction should be rejected assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx) else: # This raw transaction should be accepted self.nodes[0].sendrawtransaction(rawtx) utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # As the fees are calculated prior to the transaction being signed, # there is some uncertainty that calculate fee provides the correct # minimal fee. Since regtest coins are free, let's go ahead and # increase the fee by an order of magnitude to ensure this test # passes. fee_multiplier = 10 # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(0), CScript([b'a']))] tx2.vout[0].nValue = tx1.vout[0].nValue - \ fee_multiplier * self.nodes[0].calculate_fee(tx2) tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - fee_multiplier * node.calculate_fee(tx)), CScript([b'a']))] pad_tx(tx) tx.rehash() if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) else: # sendrawtransaction should succeed if the tx is not in the mempool node.sendrawtransaction(ToHex(tx)) return tx test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( tx2.hash, -1e15, -fee_multiplier * self.nodes[0].calculate_fee(tx2)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert(tx2.hash in self.nodes[0].getrawmempool()) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( tx2.hash, 1e15, fee_multiplier * self.nodes[0].calculate_fee(tx2)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert(tx2.hash not in self.nodes[0].getrawmempool()) # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks( tx2, self.nodes[0], use_height_lock=False) assert(tx3.hash in self.nodes[0].getrawmempool()) self.nodes[0].generate(1) assert(tx3.hash not in self.nodes[0].getrawmempool()) # One more test, this time using height locks tx4 = test_nonzero_locks( tx3, self.nodes[0], use_height_lock=True) assert(tx4.hash in self.nodes[0].getrawmempool()) # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks( tx4, self.nodes[0], use_height_lock=True) assert(tx5.hash not in self.nodes[0].getrawmempool()) utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN) raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert(tx4.hash not in self.nodes[0].getrawmempool()) assert(tx3.hash in self.nodes[0].getrawmempool()) # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash( self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert(tx3.hash not in mempool) assert(tx2.hash in mempool) # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock( self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10) def get_csv_status(self): softforks = self.nodes[0].getblockchaininfo()['softforks'] for sf in softforks: if sf['id'] == 'csv' and sf['version'] == 5: return sf['reject']['status'] raise AssertionError('Cannot find CSV fork activation informations') # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert_equal(self.get_csv_status(), False) txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"] tx2 = FromHex(tx2, tx2_raw) pad_tx(tx2) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 sequence_value = 100 # 100 block relative locktime tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] pad_tx(tx3) tx3.rehash() assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend( sorted([tx1, tx2, tx3], key=lambda tx: tx.get_id())) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].submitblock(ToHex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 576 csv_activation_height = 576 height = self.nodes[0].getblockcount() assert_greater_than(csv_activation_height - height, 1) self.nodes[0].generate(csv_activation_height - height - 1) assert_equal(self.get_csv_status(), False) disconnect_nodes(self.nodes[0], self.nodes[1]) self.nodes[0].generate(1) assert_equal(self.get_csv_status(), True) # We have a block that has CSV activated, but we want to be at # the activation point, so we invalidate the tip. self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) connect_nodes(self.nodes[0], self.nodes[1]) sync_blocks(self.nodes) # Use self.nodes[1] to test standardness relay policy def test_version2_relay(self, before_activation): inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"] try: self.nodes[1].sendrawtransaction(tx_signed) assert(before_activation == False) except: assert(before_activation) if __name__ == '__main__': BIP68Test().main() diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index ef730335b..7325d8806 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -1,1313 +1,1355 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.comptool import TestManager, TestInstance, RejectResult -from test_framework.blocktools import * +import copy +import struct import time + +from test_framework.blocktools import ( + create_block, + create_coinbase, + create_transaction, + get_legacy_sigopcount_block, + make_conform_to_ctor, +) +from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS_PER_MB +from test_framework.comptool import RejectResult, TestInstance, TestManager from test_framework.key import CECKey -from test_framework.script import * +from test_framework.messages import ( + CBlock, + CBlockHeader, + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + ser_uint256, + uint256_from_compact, + uint256_from_str, +) from test_framework.mininode import network_thread_start -import struct -from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS_PER_MB +from test_framework.script import ( + CScript, + hash160, + MAX_SCRIPT_ELEMENT_SIZE, + OP_2DUP, + OP_CHECKMULTISIG, + OP_CHECKMULTISIGVERIFY, + OP_CHECKSIG, + OP_CHECKSIGVERIFY, + OP_ELSE, + OP_ENDIF, + OP_EQUAL, + OP_FALSE, + OP_HASH160, + OP_IF, + OP_INVALIDOPCODE, + OP_RETURN, + OP_TRUE, + SIGHASH_ALL, + SIGHASH_FORKID, + SignatureHashForkId, +) +from test_framework.test_framework import ComparisonTestFramework +from test_framework.txtools import pad_tx +from test_framework.util import assert_equal class PreviousSpendableOutput(): def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending ''' This reimplements tests from the bitcoinj/FullBlockTestGenerator used by the pull-tester. We use the testing framework in which we expect a particular answer from each test. ''' # Use this class for tests that require behavior other than normal "mininode" behavior. # For now, it is used to serialize a bloated varint (b64). class CBrokenBlock(CBlock): def __init__(self, header=None): super(CBrokenBlock, self).__init__(header) def initialize(self, base_block): self.vtx = copy.deepcopy(base_block.vtx) self.hashMerkleRoot = self.calc_merkle_root() def serialize(self): r = b"" r += super(CBlock, self).serialize() r += struct.pack(" b1 (0) -> b2 (1) block(1, spend=out[0]) save_spendable_output() yield accepted() block(2, spend=out[1]) yield accepted() save_spendable_output() # so fork like this: # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) # # Nothing should happen at this point. We saw b2 first so it takes # priority. tip(1) b3 = block(3, spend=out[1]) txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0) yield rejected() # Now we add another block to make the alternative chain longer. # # genesis -> b1 (0) -> b2 (1) # \-> b3 (1) -> b4 (2) block(4, spend=out[2]) yield accepted() # ... and back to the first chain. # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b3 (1) -> b4 (2) tip(2) block(5, spend=out[2]) save_spendable_output() yield rejected() block(6, spend=out[3]) yield accepted() # Try to create a fork that double-spends # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b7 (2) -> b8 (4) # \-> b3 (1) -> b4 (2) tip(5) block(7, spend=out[2]) yield rejected() block(8, spend=out[4]) yield rejected() # Try to create a block that has too much fee # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b9 (4) # \-> b3 (1) -> b4 (2) tip(6) block(9, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Create a fork that ends in a block with too much fee (the one that causes the reorg) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b10 (3) -> b11 (4) # \-> b3 (1) -> b4 (2) tip(5) block(10, spend=out[3]) yield rejected() block(11, spend=out[4], additional_coinbase_value=1) yield rejected(RejectResult(16, b'bad-cb-amount')) # Try again, but with a valid fork first # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b14 (5) # (b12 added last) # \-> b3 (1) -> b4 (2) tip(5) b12 = block(12, spend=out[3]) save_spendable_output() b13 = block(13, spend=out[4]) # Deliver the block header for b12, and the block b13. # b13 should be accepted but the tip won't advance until b12 is # delivered. yield TestInstance([[CBlockHeader(b12), None], [b13, False]]) save_spendable_output() # b14 is invalid, but the node won't know that until it tries to connect # Tip still can't advance because b12 is missing block(14, spend=out[5], additional_coinbase_value=1) yield rejected() yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13. # Add a block with MAX_BLOCK_SIGOPS_PER_MB and one with one more sigop # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6) # \-> b3 (1) -> b4 (2) # Test that a block with a lot of checksigs is okay lots_of_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) tip(13) block(15, spend=out[5], script=lots_of_checksigs) yield accepted() save_spendable_output() # Test that a block with too many checksigs is rejected too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB)) block(16, spend=out[6], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Attempt to spend a transaction created on a different fork # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1]) # \-> b3 (1) -> b4 (2) tip(15) block(17, spend=txout_b3) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Attempt to spend a transaction created on a different fork (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b18 (b3.vtx[1]) -> b19 (6) # \-> b3 (1) -> b4 (2) tip(13) block(18, spend=txout_b3) yield rejected() block(19, spend=out[6]) yield rejected() # Attempt to spend a coinbase at depth too low # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7) # \-> b3 (1) -> b4 (2) tip(15) block(20, spend=out[7]) yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase')) # Attempt to spend a coinbase at depth too low (on a fork this time) # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) # \-> b21 (6) -> b22 (5) # \-> b3 (1) -> b4 (2) tip(13) block(21, spend=out[6]) yield rejected() block(22, spend=out[5]) yield rejected() # Create a block on either side of LEGACY_MAX_BLOCK_SIZE and make sure its accepted/rejected # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) # \-> b24 (6) -> b25 (7) # \-> b3 (1) -> b4 (2) tip(15) b23 = block(23, spend=out[6]) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b23.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0))) b23 = update_block(23, [tx]) # Make sure the math above worked out to produce a max-sized block assert_equal(len(b23.serialize()), LEGACY_MAX_BLOCK_SIZE) yield accepted() save_spendable_output() # Create blocks with a coinbase input script size out of range # genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3) # \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) # \-> ... (6) -> ... (7) # \-> b3 (1) -> b4 (2) tip(15) b26 = block(26, spend=out[6]) b26.vtx[0].vin[0].scriptSig = b'\x00' b26.vtx[0].rehash() # update_block causes the merkle root to get updated, even with no new # transactions, and updates the required state. b26 = update_block(26, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b26 chain to make sure bitcoind isn't accepting b26 block(27, spend=out[7]) yield rejected(False) # Now try a too-large-coinbase script tip(15) b28 = block(28, spend=out[6]) b28.vtx[0].vin[0].scriptSig = b'\x00' * 101 b28.vtx[0].rehash() b28 = update_block(28, []) yield rejected(RejectResult(16, b'bad-cb-length')) # Extend the b28 chain to make sure bitcoind isn't accepting b28 block(29, spend=out[7]) yield rejected(False) # b30 has a max-sized coinbase scriptSig. tip(23) b30 = block(30) b30.vtx[0].vin[0].scriptSig = b'\x00' * 100 b30.vtx[0].rehash() b30 = update_block(30, []) yield accepted() save_spendable_output() # b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY # # genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b36 (11) # \-> b34 (10) # \-> b32 (9) # # MULTISIG: each op code counts as 20 sigops. To create the edge case, # pack another 19 sigops at the end. lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ( (MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) b31 = block(31, spend=out[8], script=lots_of_multisigs) assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS_PER_MB) yield accepted() save_spendable_output() # this goes over the limit because the coinbase has one sigop too_many_multisigs = CScript( [OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) b32 = block(32, spend=out[9], script=too_many_multisigs) assert_equal(get_legacy_sigopcount_block( b32), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKMULTISIGVERIFY tip(31) lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ( (MAX_BLOCK_SIGOPS_PER_MB - 1) // 20) + [OP_CHECKSIG] * 19) block(33, spend=out[9], script=lots_of_multisigs) yield accepted() save_spendable_output() too_many_multisigs = CScript( [OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB // 20)) block(34, spend=out[10], script=too_many_multisigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # CHECKSIGVERIFY tip(33) lots_of_checksigs = CScript( [OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) b35 = block(35, spend=out[10], script=lots_of_checksigs) yield accepted() save_spendable_output() too_many_checksigs = CScript( [OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS_PER_MB)) block(36, spend=out[11], script=too_many_checksigs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Check spending of a transaction in a block which failed to connect # # b6 (3) # b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) # \-> b37 (11) # \-> b38 (11/37) # # save 37's spendable output, but then double-spend out11 to invalidate # the block tip(35) b37 = block(37, spend=out[11]) txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0) tx = create_and_sign_tx(out[11].tx, out[11].n, 0) b37 = update_block(37, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # attempt to spend b37's first non-coinbase tx, at which point b37 was # still considered valid tip(35) block(38, spend=txout_b37) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Check P2SH SigOp counting # # # 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12) # \-> b40 (12) # # b39 - create some P2SH outputs that will require 6 sigops to spend: # # redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG # p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL # tip(35) b39 = block(39) b39_outputs = 0 b39_sigops_per_output = 6 # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [ OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE # This must be signed because it is spending a coinbase spend = out[11] tx = create_tx(spend.tx, spend.n, 1, p2sh_script) tx.vout.append( CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE]))) self.sign_tx(tx, spend.tx, spend.n) tx.rehash() b39 = update_block(39, [tx]) b39_outputs += 1 # Until block is full, add tx's with 1 satoshi to p2sh_script, the rest # to OP_TRUE tx_new = None tx_last = tx tx_last_n = len(tx.vout) - 1 total_size = len(b39.serialize()) while(total_size < LEGACY_MAX_BLOCK_SIZE): tx_new = create_tx(tx_last, tx_last_n, 1, p2sh_script) tx_new.vout.append( CTxOut(tx_last.vout[tx_last_n].nValue - 1, CScript([OP_TRUE]))) tx_new.rehash() total_size += len(tx_new.serialize()) if total_size >= LEGACY_MAX_BLOCK_SIZE: break b39.vtx.append(tx_new) # add tx to block tx_last = tx_new tx_last_n = len(tx_new.vout) - 1 b39_outputs += 1 b39 = update_block(39, []) yield accepted() save_spendable_output() # Test sigops in P2SH redeem scripts # # b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops. # The first tx has one sigop and then at the end we add 2 more to put us just over the max. # # b41 does the same, less one, so it has the maximum sigops permitted. # tip(39) b40 = block(40, spend=out[12]) sigops = get_legacy_sigopcount_block(b40) numTxs = (MAX_BLOCK_SIGOPS_PER_MB - sigops) // b39_sigops_per_output assert_equal(numTxs <= b39_outputs, True) lastOutpoint = COutPoint(b40.vtx[1].sha256, 0) lastAmount = b40.vtx[1].vout[0].nValue new_txs = [] for i in range(1, numTxs + 1): tx = CTransaction() tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) tx.vin.append(CTxIn(lastOutpoint, b'')) # second input is corresponding P2SH output from b39 tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b'')) # Note: must pass the redeem_script (not p2sh_script) to the # signature hash function sighash = SignatureHashForkId( redeem_script, tx, 1, SIGHASH_ALL | SIGHASH_FORKID, lastAmount) sig = self.coinbase_key.sign( sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) scriptSig = CScript([sig, redeem_script]) tx.vin[1].scriptSig = scriptSig pad_tx(tx) tx.rehash() new_txs.append(tx) lastOutpoint = COutPoint(tx.sha256, 0) lastAmount = tx.vout[0].nValue b40_sigops_to_fill = MAX_BLOCK_SIGOPS_PER_MB - \ (numTxs * b39_sigops_per_output + sigops) + 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill))) pad_tx(tx) tx.rehash() new_txs.append(tx) update_block(40, new_txs) yield rejected(RejectResult(16, b'bad-blk-sigops')) # same as b40, but one less sigop tip(39) block(41, spend=None) update_block(41, [b40tx for b40tx in b40.vtx[1:] if b40tx != tx]) b41_sigops_to_fill = b40_sigops_to_fill - 1 tx = CTransaction() tx.vin.append(CTxIn(lastOutpoint, b'')) tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill))) pad_tx(tx) update_block(41, [tx]) yield accepted() # Fork off of b39 to create a constant base again # # b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) # \-> b41 (12) # tip(39) block(42, spend=out[12]) yield rejected() save_spendable_output() block(43, spend=out[13]) yield accepted() save_spendable_output() # Test a number of really invalid scenarios # # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14) # \-> ??? (15) # The next few blocks are going to be created "by hand" since they'll do funky things, such as having # the first transaction be non-coinbase, etc. The purpose of b44 is to # make sure this works. height = self.block_heights[self.tip.sha256] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 self.block_heights[b44.sha256] = height self.blocks[44] = b44 yield accepted() # A block with a non-coinbase as the first tx non_coinbase = create_tx(out[15].tx, out[15].n, 1) b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 b45.nBits = 0x207fffff b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.calc_sha256() b45.solve() self.block_heights[b45.sha256] = self.block_heights[ self.tip.sha256] + 1 self.tip = b45 self.blocks[45] = b45 yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with no txns tip(44) b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 b46.nBits = 0x207fffff b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() self.block_heights[b46.sha256] = self.block_heights[b44.sha256] + 1 self.tip = b46 assert 46 not in self.blocks self.blocks[46] = b46 s = ser_uint256(b46.hashMerkleRoot) yield rejected(RejectResult(16, b'bad-cb-missing')) # A block with invalid work tip(44) b47 = block(47, solve=False) target = uint256_from_compact(b47.nBits) while b47.sha256 < target: # changed > to < b47.nNonce += 1 b47.rehash() yield rejected(RejectResult(16, b'high-hash')) # A block with timestamp > 2 hrs in the future tip(44) b48 = block(48, solve=False) b48.nTime = int(time.time()) + 60 * 60 * 3 b48.solve() yield rejected(RejectResult(16, b'time-too-new')) # A block with an invalid merkle hash tip(44) b49 = block(49) b49.hashMerkleRoot += 1 b49.solve() yield rejected(RejectResult(16, b'bad-txnmrklroot')) # A block with an incorrect POW limit tip(44) b50 = block(50) b50.nBits = b50.nBits - 1 b50.solve() yield rejected(RejectResult(16, b'bad-diffbits')) # A block with two coinbase txns tip(44) b51 = block(51) cb2 = create_coinbase(51, self.coinbase_pubkey) b51 = update_block(51, [cb2]) yield rejected(RejectResult(16, b'bad-tx-coinbase')) # A block w/ duplicate txns tip(44) b52 = block(52, spend=out[15]) b52 = update_block(52, [b52.vtx[1]]) yield rejected(RejectResult(16, b'tx-duplicate')) # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) # tip(43) block(53, spend=out[14]) yield rejected() # rejected since b44 is at same height save_spendable_output() # invalid timestamp (b35 is 5 blocks back, so its time is # MedianTimePast) b54 = block(54, spend=out[15]) b54.nTime = b35.nTime - 1 b54.solve() yield rejected(RejectResult(16, b'time-too-old')) # valid timestamp tip(53) b55 = block(55, spend=out[15]) b55.nTime = b35.nTime update_block(55, []) yield accepted() save_spendable_output() # Test CVE-2012-2459 # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) # \-> b57 (16) # \-> b56p2 (16) # \-> b56 (16) # # Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without # affecting the merkle root of a block, while still invalidating it. # See: src/consensus/merkle.h # # b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx. # Result: OK # # b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle # root but duplicate transactions. # Result: Fails # # b57p2 has six transactions in its merkle tree: # - coinbase, tx, tx1, tx2, tx3, tx4 # Merkle root calculation will duplicate as necessary. # Result: OK. # # b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches # duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates # that the error was caught early, avoiding a DOS vulnerability.) # b57 - a good block with 2 txs, don't submit until end tip(55) b57 = block(57) tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) b57 = update_block(57, [tx, tx1]) # b56 - copy b57, add a duplicate tx tip(55) b56 = copy.deepcopy(b57) self.blocks[56] = b56 assert_equal(len(b56.vtx), 3) b56 = update_block(56, [b57.vtx[2]]) assert_equal(b56.hash, b57.hash) yield rejected(RejectResult(16, b'bad-txns-duplicate')) # b57p2 - a good block with 6 tx'es, don't submit until end tip(55) b57p2 = block("57p2") tx = create_and_sign_tx(out[16].tx, out[16].n, 1) tx1 = create_tx(tx, 0, 1) tx2 = create_tx(tx1, 0, 1) tx3 = create_tx(tx2, 0, 1) tx4 = create_tx(tx3, 0, 1) b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4]) # b56p2 - copy b57p2, duplicate two non-consecutive tx's tip(55) b56p2 = copy.deepcopy(b57p2) self.blocks["b56p2"] = b56p2 assert_equal(len(b56p2.vtx), 6) b56p2 = update_block("b56p2", b56p2.vtx[4:6], reorder=False) assert_equal(b56p2.hash, b57p2.hash) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip("57p2") yield accepted() tip(57) yield rejected() # rejected because 57p2 seen first save_spendable_output() # Test a few invalid tx types # # -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> ??? (17) # # tx with prevout.n out of range tip(57) b58 = block(58, spend=out[17]) tx = CTransaction() assert(len(out[17].tx.vout) < 42) tx.vin.append( CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff)) tx.vout.append(CTxOut(0, b"")) pad_tx(tx) tx.calc_sha256() b58 = update_block(58, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # tx with output value > input value out of range tip(57) b59 = block(59) tx = create_and_sign_tx(out[17].tx, out[17].n, 51 * COIN) b59 = update_block(59, [tx]) yield rejected(RejectResult(16, b'bad-txns-in-belowout')) # reset to good chain tip(57) b60 = block(60, spend=out[17]) yield accepted() save_spendable_output() # Test BIP30 # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b61 (18) # # Blocks are not allowed to contain a transaction whose id matches that of an earlier, # not-fully-spent transaction in the same chain. To test, make identical coinbases; # the second one should be rejected. # tip(60) b61 = block(61, spend=out[18]) b61.vtx[0].vin[0].scriptSig = b60.vtx[ 0].vin[0].scriptSig # equalize the coinbases b61.vtx[0].rehash() b61 = update_block(61, []) assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize()) yield rejected(RejectResult(16, b'bad-txns-BIP30')) # Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b62 (18) # tip(60) b62 = block(62) tx = CTransaction() tx.nLockTime = 0xffffffff # this locktime is non-final assert(out[18].n < len(out[18].tx.vout)) tx.vin.append( CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) assert(tx.vin[0].nSequence < 0xffffffff) tx.calc_sha256() b62 = update_block(62, [tx]) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # Test a non-final coinbase is also rejected # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) # \-> b63 (-) # tip(60) b63 = block(63) b63.vtx[0].nLockTime = 0xffffffff b63.vtx[0].vin[0].nSequence = 0xDEADBEEF b63.vtx[0].rehash() b63 = update_block(63, []) yield rejected(RejectResult(16, b'bad-txns-nonfinal')) # This checks that a block with a bloated VARINT between the block_header and the array of tx such that # the block is > LEGACY_MAX_BLOCK_SIZE with the bloated varint, but <= LEGACY_MAX_BLOCK_SIZE without the bloated varint, # does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not # care whether the bloated block is accepted or rejected; it only cares that the second block is accepted. # # What matters is that the receiving node should not reject the bloated block, and then reject the canonical # block on the basis that it's the same as an already-rejected block (which would be a consensus failure.) # # -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) # \ # b64a (18) # b64a is a bloated block (non-canonical varint) # b64 is a good block (same as b64 but w/ canonical varint) # tip(60) regular_block = block("64a", spend=out[18]) # make it a "broken_block," with non-canonical serialization b64a = CBrokenBlock(regular_block) b64a.initialize(regular_block) self.blocks["64a"] = b64a self.tip = b64a tx = CTransaction() # use canonical serialization to calculate size script_length = LEGACY_MAX_BLOCK_SIZE - \ len(b64a.normal_serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0))) b64a = update_block("64a", [tx]) assert_equal(len(b64a.serialize()), LEGACY_MAX_BLOCK_SIZE + 8) yield TestInstance([[self.tip, None]]) # comptool workaround: to make sure b64 is delivered, manually erase # b64a from blockstore self.test.block_store.erase(b64a.sha256) tip(60) b64 = CBlock(b64a) b64.vtx = copy.deepcopy(b64a.vtx) assert_equal(b64.hash, b64a.hash) assert_equal(len(b64.serialize()), LEGACY_MAX_BLOCK_SIZE) self.blocks[64] = b64 update_block(64, []) yield accepted() save_spendable_output() # Spend an output created in the block itself # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # tip(64) block(65) tx1 = create_and_sign_tx( out[19].tx, out[19].n, out[19].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 0) update_block(65, [tx1, tx2]) yield accepted() save_spendable_output() # Attempt to double-spend a transaction created in a block # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) # \-> b67 (20) # # tip(65) block(67) tx1 = create_and_sign_tx( out[20].tx, out[20].n, out[20].tx.vout[0].nValue) tx2 = create_and_sign_tx(tx1, 0, 1) tx3 = create_and_sign_tx(tx1, 0, 2) update_block(67, [tx1, tx2, tx3]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # More tests of block subsidy # # -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b68 (20) # # b68 - coinbase with an extra 10 satoshis, # creates a tx that has 9 satoshis from out[20] go to fees # this fails because the coinbase is trying to claim 1 satoshi too much in fees # # b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee # this succeeds # tip(65) block(68, additional_coinbase_value=10) tx = create_and_sign_tx( out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 9) update_block(68, [tx]) yield rejected(RejectResult(16, b'bad-cb-amount')) tip(65) b69 = block(69, additional_coinbase_value=10) tx = create_and_sign_tx( out[20].tx, out[20].n, out[20].tx.vout[0].nValue - 10) update_block(69, [tx]) yield accepted() save_spendable_output() # Test spending the outpoint of a non-existent transaction # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) # \-> b70 (21) # tip(69) block(70, spend=out[21]) bogus_tx = CTransaction() bogus_tx.sha256 = uint256_from_str( b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c") tx = CTransaction() tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff)) tx.vout.append(CTxOut(1, b"")) pad_tx(tx) tx.rehash() update_block(70, [tx]) yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent')) # Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks) # # -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b71 (21) # # b72 is a good block. # b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71. # tip(69) b72 = block(72) tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2) tx2 = create_and_sign_tx(tx1, 0, 1) b72 = update_block(72, [tx1, tx2]) # now tip is 72 b71 = copy.deepcopy(b72) # add duplicate last transaction b71.vtx.append(b72.vtx[-1]) # b71 builds off b69 self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 self.blocks[71] = b71 assert_equal(len(b71.vtx), 4) assert_equal(len(b72.vtx), 3) assert_equal(b72.sha256, b71.sha256) tip(71) yield rejected(RejectResult(16, b'bad-txns-duplicate')) tip(72) yield accepted() save_spendable_output() # Test some invalid scripts and MAX_BLOCK_SIGOPS_PER_MB # # -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21) # \-> b** (22) # # b73 - tx with excessive sigops that are placed after an excessively large script element. # The purpose of the test is to make sure those sigops are counted. # # script is a bytearray of size 20,526 # # bytearray[0-19,998] : OP_CHECKSIG # bytearray[19,999] : OP_PUSHDATA4 # bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format) # bytearray[20,004-20,525]: unread data (script_element) # bytearray[20,526] : OP_CHECKSIG (this puts us over the limit) # tip(72) b73 = block(73) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = int("4e", 16) # OP_PUSHDATA4 element_size = MAX_SCRIPT_ELEMENT_SIZE + 1 a[MAX_BLOCK_SIGOPS_PER_MB] = element_size % 256 a[MAX_BLOCK_SIGOPS_PER_MB + 1] = element_size // 256 a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0 a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0 tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b73 = update_block(73, [tx]) assert_equal( get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS_PER_MB + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # b74/75 - if we push an invalid script element, all prevous sigops are counted, # but sigops after the element are not counted. # # The invalid script element is that the push_data indicates that # there will be a large amount of data (0xffffff bytes), but we only # provide a much smaller number. These bytes are CHECKSIGS so they would # cause b75 to fail for excessive sigops, if those bytes were counted. # # b74 fails because we put MAX_BLOCK_SIGOPS_PER_MB+1 before the element # b75 succeeds because we put MAX_BLOCK_SIGOPS_PER_MB before the element # # tip(72) b74 = block(74) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + \ MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xfe a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 4] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b74 = update_block(74, [tx]) yield rejected(RejectResult(16, b'bad-blk-sigops')) tip(72) b75 = block(75) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e a[MAX_BLOCK_SIGOPS_PER_MB] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 1] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 2] = 0xff a[MAX_BLOCK_SIGOPS_PER_MB + 3] = 0xff tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a)) b75 = update_block(75, [tx]) yield accepted() save_spendable_output() # Check that if we push an element filled with CHECKSIGs, they are not # counted tip(75) b76 = block(76) size = MAX_BLOCK_SIGOPS_PER_MB - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 a = bytearray([OP_CHECKSIG] * size) a[MAX_BLOCK_SIGOPS_PER_MB - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a)) b76 = update_block(76, [tx]) yield accepted() save_spendable_output() # Test transaction resurrection # # -> b77 (24) -> b78 (25) -> b79 (26) # \-> b80 (25) -> b81 (26) -> b82 (27) # # b78 creates a tx, which is spent in b79. After b82, both should be in mempool # # The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the # rather obscure reason that the Python signature code does not distinguish between # Low-S and High-S values (whereas the bitcoin code has custom code which does so); # as a result of which, the odds are 50% that the python code will use the right # value and the transaction will be accepted into the mempool. Until we modify the # test framework to support low-S signing, we are out of luck. # # To get around this issue, we construct transactions which are not signed and which # spend to OP_TRUE. If the standard-ness rules change, this test would need to be # updated. (Perhaps to spend to a P2SH OP_TRUE script) # tip(76) block(77) tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10 * COIN) update_block(77, [tx77]) yield accepted() save_spendable_output() block(78) tx78 = create_tx(tx77, 0, 9 * COIN) update_block(78, [tx78]) yield accepted() block(79) tx79 = create_tx(tx78, 0, 8 * COIN) update_block(79, [tx79]) yield accepted() # mempool should be empty assert_equal(len(self.nodes[0].getrawmempool()), 0) tip(77) block(80, spend=out[25]) yield rejected() save_spendable_output() block(81, spend=out[26]) yield rejected() # other chain is same length save_spendable_output() block(82, spend=out[27]) yield accepted() # now this chain is longer, triggers re-org save_spendable_output() # now check that tx78 and tx79 have been put back into the peer's # mempool mempool = self.nodes[0].getrawmempool() assert_equal(len(mempool), 2) assert(tx78.hash in mempool) assert(tx79.hash in mempool) # Test invalid opcodes in dead execution paths. # # -> b81 (26) -> b82 (27) -> b83 (28) # block(83) op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF] script = CScript(op_codes) tx1 = create_and_sign_tx( out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script) tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE])) tx2.vin[0].scriptSig = CScript([OP_FALSE]) tx2.rehash() update_block(83, [tx1, tx2]) yield accepted() save_spendable_output() # Reorg on/off blocks that have OP_RETURN in them (and try to spend them) # # -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31) # \-> b85 (29) -> b86 (30) \-> b89a (32) # # block(84) tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN])) vout_offset = len(tx1.vout) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx1.calc_sha256() self.sign_tx(tx1, out[29].tx, out[29].n) tx1.rehash() tx2 = create_tx(tx1, vout_offset, 0, CScript([OP_RETURN])) tx2.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx3 = create_tx(tx1, vout_offset + 1, 0, CScript([OP_RETURN])) tx3.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx4 = create_tx(tx1, vout_offset + 2, 0, CScript([OP_TRUE])) tx4.vout.append(CTxOut(0, CScript([OP_RETURN]))) tx5 = create_tx(tx1, vout_offset + 3, 0, CScript([OP_RETURN])) update_block(84, [tx1, tx2, tx3, tx4, tx5]) yield accepted() save_spendable_output() tip(83) block(85, spend=out[29]) yield rejected() block(86, spend=out[30]) yield accepted() tip(84) block(87, spend=out[30]) yield rejected() save_spendable_output() block(88, spend=out[31]) yield accepted() save_spendable_output() # trying to spend the OP_RETURN output is rejected block("89a", spend=out[32]) tx = create_tx(tx1, 0, 0, CScript([OP_TRUE])) update_block("89a", [tx]) yield rejected() # Test re-org of a week's worth of blocks (1088 blocks) # This test takes a minute or two and can be accomplished in memory # if self.options.runbarelyexpensive: tip(88) LARGE_REORG_SIZE = 1088 test1 = TestInstance(sync_every_block=False) spend = out[32] for i in range(89, LARGE_REORG_SIZE + 89): b = block(i, spend) tx = CTransaction() script_length = LEGACY_MAX_BLOCK_SIZE - len(b.serialize()) - 69 script_output = CScript([b'\x00' * script_length]) tx.vout.append(CTxOut(0, script_output)) tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0))) b = update_block(i, [tx]) assert_equal(len(b.serialize()), LEGACY_MAX_BLOCK_SIZE) test1.blocks_and_transactions.append([self.tip, True]) save_spendable_output() spend = get_spendable_output() yield test1 chain1_tip = i # now create alt chain of same length tip(88) test2 = TestInstance(sync_every_block=False) for i in range(89, LARGE_REORG_SIZE + 89): block("alt" + str(i)) test2.blocks_and_transactions.append([self.tip, False]) yield test2 # extend alt chain to trigger re-org block("alt" + str(chain1_tip + 1)) yield accepted() # ... and re-org back to the first chain tip(chain1_tip) block(chain1_tip + 1) yield rejected() block(chain1_tip + 2) yield accepted() chain1_tip += 2 if __name__ == '__main__': FullBlockTest().main() diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 7d3ebd050..3cccf0361 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -1,246 +1,264 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP65 (CHECKLOCKTIMEVERIFY). Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height 1351. """ +from test_framework.blocktools import create_block, create_coinbase +from test_framework.messages import ( + CTransaction, + FromHex, + msg_block, + msg_tx, + ToHex, +) +from test_framework.mininode import ( + mininode_lock, + network_thread_start, + P2PInterface, +) +from test_framework.script import ( + CScript, + CScriptNum, + OP_1NEGATE, + OP_CHECKLOCKTIMEVERIFY, + OP_DROP, + OP_TRUE, +) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import CScript, CScriptNum, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE from test_framework.txtools import pad_tx +from test_framework.util import assert_equal, wait_until CLTV_HEIGHT = 1351 # Reject codes that we might receive in this test REJECT_INVALID = 16 REJECT_OBSOLETE = 17 REJECT_NONSTANDARD = 64 def cltv_lock_to_height(node, tx, height=-1): '''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY This transforms the script to anyone can spend (OP_TRUE) if the lock time condition is valid. Default height is -1 which leads CLTV to fail TODO: test more ways that transactions using CLTV could be invalid (eg locktime requirements fail, sequence time requirements fail, etc). ''' height_op = OP_1NEGATE if(height > 0): tx.vin[0].nSequence = 0 tx.nLockTime = height height_op = CScriptNum(height) tx.vout[0].scriptPubKey = CScript( [height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signed_result = node.signrawtransaction(ToHex(tx)) new_tx = FromHex(CTransaction(), signed_result['hex']) pad_tx(new_tx) new_tx.rehash() return new_tx def spend_from_coinbase(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx) tx = FromHex(CTransaction(), signresult['hex']) return tx class BIP65Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [ ['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] self.setup_clean_chain = True def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2)) self.coinbase_blocks = self.nodes[0].generate(CLTV_HEIGHT - 2) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info( "Test that an invalid-according-to-CLTV transaction can still appear in a block") spendtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 50.0) spendtx = cltv_lock_to_height(self.nodes[0], spendtx) # Make sure the tx is valid self.nodes[0].sendrawtransaction(ToHex(spendtx)) tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block(int(tip, 16), create_coinbase( CLTV_HEIGHT - 1), block_time) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) self.log.info("Test that blocks must now be at least version 4") tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time) block.nVersion = 3 block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal( self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal( self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000003)') assert_equal( self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that invalid-according-to-cltv transactions cannot appear in a block") block.nVersion = 4 spendtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 49.99) spendtx = cltv_lock_to_height(self.nodes[0], spendtx) # First we show that this tx is valid except for CLTV by getting it # accepted to the mempool (which we can achieve with # -promiscuousmempoolflags). self.nodes[0].p2p.send_and_ping(msg_tx(spendtx)) assert spendtx.hash in self.nodes[0].getrawmempool() # Mine a block containing the funding transaction block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # But a block containing a transaction spending this utxo is not rawspendtx = self.nodes[0].decoderawtransaction(ToHex(spendtx)) inputs = [{ "txid": rawspendtx['txid'], "vout": rawspendtx['vout'][0]['n'] }] output = {self.nodeaddress: 49.98} rejectedtx_raw = self.nodes[0].createrawtransaction(inputs, output) rejectedtx_signed = self.nodes[0].signrawtransaction(rejectedtx_raw) # Couldn't complete signature due to CLTV assert(rejectedtx_signed['errors'][0]['error'] == 'Negative locktime') rejectedtx = FromHex(CTransaction(), rejectedtx_signed['hex']) pad_tx(rejectedtx) rejectedtx.rehash() tip = block.hash block_time += 1 block = create_block( block.sha256, create_coinbase(CLTV_HEIGHT+1), block_time) block.nVersion = 4 block.vtx.append(rejectedtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is invalid assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD] assert_equal( self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal( self.nodes[0].p2p.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Negative locktime' in self.nodes[0].p2p.last_message["reject"].reason self.log.info( "Test that a version 4 block with a valid-according-to-CLTV transaction is accepted") spendtx = spend_from_coinbase(self.nodes[0], self.coinbase_blocks[2], self.nodeaddress, 49.99) spendtx = cltv_lock_to_height(self.nodes[0], spendtx, CLTV_HEIGHT - 1) # Modify the transaction in the block to be valid against CLTV block.vtx.pop(1) block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is now valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) # A block containing a transaction spending this utxo is also valid # Build this transaction rawspendtx = self.nodes[0].decoderawtransaction(ToHex(spendtx)) inputs = [{ "txid": rawspendtx['txid'], "vout": rawspendtx['vout'][0]['n'], "sequence": 0 }] output = {self.nodeaddress: 49.98} validtx_raw = self.nodes[0].createrawtransaction( inputs, output, CLTV_HEIGHT) validtx = FromHex(CTransaction(), validtx_raw) # Signrawtransaction won't sign a non standard tx. # But the prevout being anyone can spend, scriptsig can be left empty validtx.vin[0].scriptSig = CScript() pad_tx(validtx) validtx.rehash() tip = block.sha256 block_time += 1 block = create_block(tip, create_coinbase(CLTV_HEIGHT+3), block_time) block.nVersion = 4 block.vtx.append(validtx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) # This block is valid assert_equal(self.nodes[0].getbestblockhash(), block.hash) if __name__ == '__main__': BIP65Test().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 4544e291d..ecb3e9425 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -1,64 +1,64 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test various command line arguments and configuration file parameters.""" -import time import os +import time from test_framework.test_framework import BitcoinTestFramework from test_framework.util import get_datadir_path class ConfArgsTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): self.stop_node(0) # Remove the -datadir argument so it doesn't override the config file self.nodes[0].remove_default_args(["-datadir"]) default_data_dir = get_datadir_path(self.options.tmpdir, 0) new_data_dir = os.path.join(default_data_dir, 'newdatadir') new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2') # Check that using -datadir argument on non-existent directory fails self.nodes[0].datadir = new_data_dir self.assert_start_raises_init_error( 0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.') # Check that using non-existent datadir in conf file fails conf_file = os.path.join(default_data_dir, "bitcoin.conf") # datadir needs to be set before [regtest] section conf_file_contents = open(conf_file, encoding='utf8').read() with open(conf_file, 'w', encoding='utf8') as f: f.write("datadir=" + new_data_dir + "\n") f.write(conf_file_contents) self.assert_start_raises_init_error( 0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.') # Create the directory and ensure the config file now works os.mkdir(new_data_dir) self.start_node(0, ['-conf='+conf_file, '-wallet=w1']) time.sleep(5) self.stop_node(0) assert os.path.isfile(os.path.join( new_data_dir, 'regtest', 'wallets', 'w1')) # Ensure command line argument overrides datadir in conf os.mkdir(new_data_dir_2) self.nodes[0].datadir = new_data_dir_2 self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2']) assert os.path.isfile(os.path.join( new_data_dir_2, 'regtest', 'wallets', 'w2')) if __name__ == '__main__': ConfArgsTest().main() diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index 16487d22a..e5fab0259 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -1,692 +1,704 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test activation of the first version bits soft fork. This soft fork will activate the following BIPS: BIP 68 - nSequence relative lock times BIP 112 - CHECKSEQUENCEVERIFY BIP 113 - MedianTimePast semantics for nLockTime regtest lock-in with 108/144 block signalling activation after a further 144 blocks mine 82 blocks whose coinbases will be used to generate inputs for our tests mine 489 blocks and seed block chain with the 82 inputs will use for our tests at height 572 mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered mine 1 block and test that enforcement has triggered (which triggers ACTIVE) Test BIP 113 is enforced Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height Mine 1 block so next height is 581 and test BIP 68 now passes time but not height Mine 1 block so next height is 582 and test BIP 68 now passes time and height Test that BIP 112 is enforced Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates And that after the soft fork activates transactions pass and fail as they should according to the rules. For each BIP, transactions of versions 1 and 2 will be tested. ---------------- BIP 113: bip113tx - modify the nLocktime variable BIP 68: bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below BIP 112: bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112tx_special - test negative argument to OP_CSV """ -from test_framework.test_framework import ComparisonTestFramework -from test_framework.util import * -from test_framework.mininode import ToHex, FromHex, CTransaction, network_thread_start, COIN -from test_framework.blocktools import create_coinbase, create_block, make_conform_to_ctor -from test_framework.comptool import TestInstance, TestManager -from test_framework.script import * +from decimal import Decimal import time +from test_framework.blocktools import ( + create_block, + create_coinbase, + make_conform_to_ctor, +) +from test_framework.comptool import TestInstance, TestManager +from test_framework.messages import COIN, CTransaction, FromHex, ToHex +from test_framework.mininode import network_thread_start +from test_framework.script import ( + CScript, + OP_CHECKSEQUENCEVERIFY, + OP_DROP, + OP_TRUE, +) +from test_framework.test_framework import ComparisonTestFramework +from test_framework.util import assert_equal + base_relative_locktime = 10 seq_disable_flag = 1 << 31 seq_random_high_bit = 1 << 25 seq_type_flag = 1 << 22 seq_random_low_bit = 1 << 18 # b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field # relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1 relative_locktimes = [] for b31 in range(2): b25times = [] for b25 in range(2): b22times = [] for b22 in range(2): b18times = [] for b18 in range(2): rlt = base_relative_locktime if (b31): rlt = rlt | seq_disable_flag if (b25): rlt = rlt | seq_random_high_bit if (b22): rlt = rlt | seq_type_flag if (b18): rlt = rlt | seq_random_low_bit b18times.append(rlt) b22times.append(b18times) b25times.append(b22times) relative_locktimes.append(b25times) def all_rlt_txs(txarray): txs = [] for b31 in range(2): for b25 in range(2): for b22 in range(2): for b18 in range(2): txs.append(txarray[b31][b25][b22][b18]) return txs def get_csv_status(node): softforks = node.getblockchaininfo()['softforks'] for sf in softforks: if sf['id'] == 'csv' and sf['version'] == 5: return sf['reject']['status'] raise AssertionError('Cannot find CSV fork activation informations') class BIP68_112_113Test(ComparisonTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4']] def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) network_thread_start() test.run() def send_generic_input_tx(self, node, coinbases): amount = Decimal("49.99") return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) def create_transaction(self, node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) tx = FromHex(CTransaction(), rawtx) return tx def sign_transaction(self, node, unsignedtx): rawtx = ToHex(unsignedtx) signresult = node.signrawtransaction(rawtx) tx = FromHex(CTransaction(), signresult['hex']) return tx def spend_tx(self, node, prev_tx): spendtx = self.create_transaction( node, prev_tx.hash, self.nodeaddress, (prev_tx.vout[0].nValue - 1000) / COIN) spendtx.nVersion = prev_tx.nVersion spendtx.rehash() return spendtx def generate_blocks(self, number): test_blocks = [] for i in range(number): block = self.create_test_block([]) test_blocks.append([block, True]) self.last_block_time += 600 self.tip = block.sha256 self.tipheight += 1 return test_blocks def create_test_block(self, txs, version=536870912): block = create_block(self.tip, create_coinbase( self.tipheight + 1), self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block # Create a block with given txs, and spend these txs in the same block. # Spending utxos in the same block is OK as long as nSequence is not enforced. # Otherwise a number of intermediate blocks should be generated, and this # method should not be used. def create_test_block_spend_utxos(self, node, txs, version=536870912): block = self.create_test_block(txs, version) block.vtx.extend([self.spend_tx(node, tx) for tx in txs]) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block def create_bip68txs(self, bip68inputs, txversion, locktime_delta=0): txs = [] assert(len(bip68inputs) >= 16) i = 0 for b31 in range(2): b25txs = [] for b25 in range(2): b22txs = [] for b22 in range(2): b18txs = [] for b18 in range(2): tx = self.create_transaction( self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 tx.nVersion = txversion tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + \ locktime_delta b18txs.append(self.sign_transaction(self.nodes[0], tx)) b22txs.append(b18txs) b25txs.append(b22txs) txs.append(b25txs) return txs def create_bip112special(self, input, txversion): tx = self.create_transaction( self.nodes[0], input, self.nodeaddress, Decimal("49.98")) tx.nVersion = txversion tx.vout[0].scriptPubKey = CScript( [-1, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = self.sign_transaction(self.nodes[0], tx) signtx.rehash() return signtx def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta=0): txs = [] assert(len(bip112inputs) >= 16) i = 0 for b31 in range(2): b25txs = [] for b25 in range(2): b22txs = [] for b22 in range(2): b18txs = [] for b18 in range(2): tx = self.create_transaction( self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 # if varying OP_CSV, nSequence is fixed if (varyOP_CSV): tx.vin[0].nSequence = base_relative_locktime + \ locktime_delta # vary nSequence instead, OP_CSV is fixed else: tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + \ locktime_delta tx.nVersion = txversion if (varyOP_CSV): tx.vout[0].scriptPubKey = CScript( [relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) else: tx.vout[0].scriptPubKey = CScript( [base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP, OP_TRUE]) tx.rehash() signtx = self.sign_transaction(self.nodes[0], tx) signtx.rehash() b18txs.append(signtx) b22txs.append(b18txs) b25txs.append(b22txs) txs.append(b25txs) return txs def get_tests(self): # Enough to build up to 1000 blocks 10 minutes apart without worrying # about getting into the future long_past_time = int(time.time()) - 600 * 1000 # Enough so that the generated blocks will still all be before long_past_time self.nodes[0].setmocktime(long_past_time - 100) # 82 blocks generated for inputs self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2 * 32 + 1) # Set time back to present so yielded blocks aren't in the future as # we advance last_block_time self.nodes[0].setmocktime(0) # height of the next block to build self.tipheight = 82 self.last_block_time = long_past_time self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.nodeaddress = self.nodes[0].getnewaddress() # CSV is not activated yet. assert_equal(get_csv_status(self.nodes[0]), False) # 489 more version 4 blocks test_blocks = self.generate_blocks(489) # Test #1 yield TestInstance(test_blocks, sync_every_block=False) # Still not activated. assert_equal(get_csv_status(self.nodes[0]), False) # Inputs at height = 572 # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for i in range(16): bip68inputs.append(self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append(self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append(self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks)) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112specialinput = self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks) # 1 normal input bip113input = self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks) self.nodes[0].setmocktime(self.last_block_time + 600) # 1 block generated for inputs to be in chain at height 572 inputblockhash = self.nodes[0].generate(1)[0] self.nodes[0].setmocktime(0) self.tip = int("0x" + inputblockhash, 0) self.tipheight += 1 self.last_block_time += 600 assert_equal(len(self.nodes[0].getblock( inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2) # Test #2 yield TestInstance(test_blocks, sync_every_block=False) # Not yet activated, height = 574 (will activate for block 576, not 575) assert_equal(get_csv_status(self.nodes[0]), False) # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to # put in appropriate block time bip113tx_v1 = self.create_transaction( self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = self.create_transaction( self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = self.create_bip112txs( bip112basicinputs[0], False, 1) bip112txs_vary_nSequence_v2 = self.create_bip112txs( bip112basicinputs[0], False, 2) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = self.create_bip112txs( bip112basicinputs[1], False, 1, -1) bip112txs_vary_nSequence_9_v2 = self.create_bip112txs( bip112basicinputs[1], False, 2, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_v1 = self.create_bip112txs( bip112diverseinputs[0], True, 1) bip112txs_vary_OP_CSV_v2 = self.create_bip112txs( bip112diverseinputs[0], True, 2) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs( bip112diverseinputs[1], True, 1, -1) bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs( bip112diverseinputs[1], True, 2, -1) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) ### TESTING ### ################################## ### Before Soft Forks Activate ### ################################## # All txs should pass ### Version 1 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx # = MTP of prior block (not <) but < time put on current block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) success_txs.append(self.spend_tx(self.nodes[0], bip112tx_special_v1)) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_nSequence_v1)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v1)]) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v1)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)]) # Test #3 yield TestInstance([[self.create_test_block(success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) ### Version 2 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx # = MTP of prior block (not <) but < time put on current block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) success_txs.append(self.spend_tx(self.nodes[0], bip112tx_special_v2)) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_nSequence_v2)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_v2)]) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_nSequence_9_v2)]) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) success_txs.extend([self.spend_tx(self.nodes[0], tx) for tx in all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)]) # Test #4 yield TestInstance([[self.create_test_block(success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 575 so the fork should # now be active for the next block test_blocks = self.generate_blocks(1) # Test #5 yield TestInstance(test_blocks, sync_every_block=False) assert_equal(get_csv_status(self.nodes[0]), False) self.nodes[0].generate(1) assert_equal(get_csv_status(self.nodes[0]), True) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) ################################# ### After Soft Forks Activate ### ################################# ### BIP 113 ### # BIP 113 tests should now fail regardless of version number # if nLockTime isn't satisfied by new rules # = MTP of prior block (not <) but < time put on current block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) # = MTP of prior block (not <) but < time put on current block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #6, Test #7 yield TestInstance([[self.create_test_block([bip113tx]), False]]) # BIP 113 tests should now pass if the locktime is < MTP # < MTP of prior block bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) # < MTP of prior block bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: # Test #8, Test #9 yield TestInstance([[self.create_test_block([bip113tx]), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4) # Test #10 yield TestInstance(test_blocks, sync_every_block=False) ### BIP 68 ### ### Version 1 txs ### # All still pass success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) # Test #11 yield TestInstance([[self.create_test_block(success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) ### Version 2 txs ### bip68success_txs = [] # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass for b25 in range(2): for b22 in range(2): for b18 in range(2): bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) # Test #12 yield TestInstance([[self.create_test_block(bip68success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and # delta time = 8 * 600 < 10 * 512 bip68timetxs = [] for b25 in range(2): for b18 in range(2): bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) for tx in bip68timetxs: # Test #13 - Test #16 yield TestInstance([[self.create_test_block([tx]), False]]) bip68heighttxs = [] for b25 in range(2): for b18 in range(2): bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) for tx in bip68heighttxs: # Test #17 - Test #20 yield TestInstance([[self.create_test_block([tx]), False]]) # Advance one block to 581 test_blocks = self.generate_blocks(1) # Test #21 yield TestInstance(test_blocks, sync_every_block=False) # Height txs should fail and time txs should now pass 9 * 600 > 10 * 512 bip68success_txs.extend(bip68timetxs) # Test #22 yield TestInstance([[self.create_test_block(bip68success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: # Test #23 - Test #26 yield TestInstance([[self.create_test_block([tx]), False]]) # Advance one block to 582 test_blocks = self.generate_blocks(1) # Test #27 yield TestInstance(test_blocks, sync_every_block=False) # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) # Test #28 yield TestInstance([[self.create_test_block(bip68success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) ### BIP 112 ### ### Version 1 txs ### # -1 OP_CSV tx should fail # Test #29 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], [bip112tx_special_v1]), False]]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 1 txs should still pass success_txs = [] for b25 in range(2): for b22 in range(2): for b18 in range(2): success_txs.append( bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) success_txs.append( bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) # Test #30 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, # version 1 txs should now fail fail_txs = [] fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) for b25 in range(2): for b22 in range(2): for b18 in range(2): fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) fail_txs.append( bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) for tx in fail_txs: # Test #31 - Test #78 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], [tx]), False]]) ### Version 2 txs ### # -1 OP_CSV tx should fail # Test #79 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], [bip112tx_special_v2]), False]]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 2 txs should pass success_txs = [] for b25 in range(2): for b22 in range(2): for b18 in range(2): # 8/16 of vary_OP_CSV success_txs.append( bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 success_txs.append( bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # Test #80 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], success_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) ## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch # or failing the CSV check fail_txs = [] # 16/16 of vary_nSequence_9 fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) for b25 in range(2): for b22 in range(2): for b18 in range(2): # 16/16 of vary_OP_CSV_9 fail_txs.append( bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) for tx in fail_txs: # Test #81 - Test #104 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], [tx]), False]]) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail fail_txs = [] for b25 in range(2): for b22 in range(2): for b18 in range(2): # 8/16 of vary_nSequence fail_txs.append( bip112txs_vary_nSequence_v2[1][b25][b22][b18]) for tx in fail_txs: # Test #105 - Test #112 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], [tx]), False]]) # If sequencelock types mismatch, tx should fail fail_txs = [] for b25 in range(2): for b18 in range(2): # 12/16 of vary_nSequence fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) for tx in fail_txs: # Test #113 - Test #120 yield TestInstance([[self.create_test_block_spend_utxos(self.nodes[0], [tx]), False]]) # Remaining txs should pass, just test masking works properly success_txs = [] for b25 in range(2): for b18 in range(2): # 16/16 of vary_nSequence success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # Test #121 yield TestInstance([[self.create_test_block(success_txs), True]]) # Spending the previous block utxos requires a difference of 10 blocks (nSequence = 10). # Generate 9 blocks then spend in the 10th block = self.nodes[0].getbestblockhash() self.last_block_time += 600 self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #122 yield TestInstance(self.generate_blocks(9), sync_every_block=False) spend_txs = [] for tx in success_txs: raw_tx = self.spend_tx(self.nodes[0], tx) raw_tx.vin[0].nSequence = base_relative_locktime raw_tx.rehash() spend_txs.append(raw_tx) # Test #123 yield TestInstance([[self.create_test_block(spend_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works properly time_txs = [] for b25 in range(2): for b18 in range(2): tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] signtx = self.sign_transaction(self.nodes[0], tx) time_txs.append(signtx) # Test #124 yield TestInstance([[self.create_test_block(time_txs), True]]) # Spending the previous block utxos requires a block time difference of # at least 10 * 512s (nSequence = 10). # Generate 8 blocks then spend in the 9th (9 * 600 > 10 * 512) block = self.nodes[0].getbestblockhash() self.last_block_time += 600 self.tip = int("0x" + block, 0) self.tipheight += 1 # Test #125 yield TestInstance(self.generate_blocks(8), sync_every_block=False) spend_txs = [] for tx in time_txs: raw_tx = self.spend_tx(self.nodes[0], tx) raw_tx.vin[0].nSequence = base_relative_locktime | seq_type_flag raw_tx.rehash() spend_txs.append(raw_tx) # Test #126 yield TestInstance([[self.create_test_block(spend_txs), True]]) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Missing aspects of test # Testing empty stack fails if __name__ == '__main__': BIP68_112_113Test().main() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index e864345a1..8931cd293 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -1,310 +1,316 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test recovery from a crash during chainstate writing. - 4 nodes * node0, node1, and node2 will have different dbcrash ratios, and different dbcache sizes * node3 will be a regular node, with no crashing. * The nodes will not connect to each other. - use default test framework starting chain. initialize starting_tip_height to tip height. - Main loop: * generate lots of transactions on node3, enough to fill up a block. * uniformly randomly pick a tip height from starting_tip_height to tip_height; with probability 1/(height_difference+4), invalidate this block. * mine enough blocks to overtake tip_height at start of loop. * for each node in [node0,node1,node2]: - for each mined block: * submit block to node * if node crashed on/after submitting: - restart until recovery succeeds - check that utxo matches node3 using gettxoutsetinfo""" import errno import http.client import random import sys import time -from test_framework.mininode import * -from test_framework.script import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * from test_framework.blocktools import create_confirmed_utxos +from test_framework.messages import ( + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + ToHex, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, hex_str_to_bytes HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest] try: HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected) except AttributeError: pass class ChainstateWriteCrashTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 self.setup_clean_chain = False # Set -maxmempool=0 to turn off mempool memory sharing with dbcache # Set -rpcservertimeout=900 to reduce socket disconnects in this # long-running test self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000", "-noparkdeepreorg"] # Set different crash ratios and cache sizes. Note that not all of # -dbcache goes to pcoinsTip. self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args # Node3 is a normal node with default args, except will mine full blocks self.node3_args = ["-blockmaxsize=32000000"] self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args] def setup_network(self): # Need a bit of extra time for the nodes to start up for this test self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90) self.start_nodes() # Leave them unconnected, we'll use submitblock directly in this test def restart_node(self, node_index, expected_tip): """Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash. Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up after 60 seconds. Returns the utxo hash of the given node.""" time_start = time.time() while time.time() - time_start < 120: try: # Any of these RPC calls could throw due to node crash self.start_node(node_index) self.nodes[node_index].waitforblock(expected_tip) utxo_hash = self.nodes[node_index].gettxoutsetinfo()[ 'hash_serialized'] return utxo_hash except: # An exception here should mean the node is about to crash. # If bitcoind exits, then try again. wait_for_node_exit() # should raise an exception if bitcoind doesn't exit. self.wait_for_node_exit(node_index, timeout=15) self.crashed_on_restart += 1 time.sleep(1) # If we got here, bitcoind isn't coming back up on restart. Could be a # bug in bitcoind, or we've gotten unlucky with our dbcrash ratio -- # perhaps we generated a test case that blew up our cache? # TODO: If this happens a lot, we should try to restart without -dbcrashratio # and make sure that recovery happens. raise AssertionError( "Unable to successfully restart node {} in allotted time".format(node_index)) def submit_block_catch_error(self, node_index, block): """Try submitting a block to the given node. Catch any exceptions that indicate the node has crashed. Returns true if the block was submitted successfully; false otherwise.""" try: self.nodes[node_index].submitblock(block) return True except http.client.BadStatusLine as e: # Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error. if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''": self.log.debug( "node {} submitblock raised exception: {}".format(node_index, e)) return False else: raise except tuple(HTTP_DISCONNECT_ERRORS) as e: self.log.debug( "node {} submitblock raised exception: {}".format(node_index, e)) return False except OSError as e: self.log.debug( "node {} submitblock raised OSError exception: errno={}".format(node_index, e.errno)) if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]: # The node has likely crashed return False else: # Unexpected exception, raise raise def sync_node3blocks(self, block_hashes): """Use submitblock to sync node3's chain with the other nodes If submitblock fails, restart the node and get the new utxo hash. If any nodes crash while updating, we'll compare utxo hashes to ensure recovery was successful.""" node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized'] # Retrieve all the blocks from node3 blocks = [] for block_hash in block_hashes: blocks.append( [block_hash, self.nodes[3].getblock(block_hash, False)]) # Deliver each block to each other node for i in range(3): nodei_utxo_hash = None self.log.debug("Syncing blocks to node {}".format(i)) for (block_hash, block) in blocks: # Get the block from node3, and submit to node_i self.log.debug("submitting block {}".format(block_hash)) if not self.submit_block_catch_error(i, block): # TODO: more carefully check that the crash is due to -dbcrashratio # (change the exit code perhaps, and check that here?) self.wait_for_node_exit(i, timeout=30) self.log.debug( "Restarting node %d after block hash {}".format(i, block_hash)) nodei_utxo_hash = self.restart_node(i, block_hash) assert nodei_utxo_hash is not None self.restart_counts[i] += 1 else: # Clear it out after successful submitblock calls -- the cached # utxo hash will no longer be correct nodei_utxo_hash = None # Check that the utxo hash matches node3's utxo set # NOTE: we only check the utxo set if we had to restart the node # after the last block submitted: # - checking the utxo hash causes a cache flush, which we don't # want to do every time; so # - we only update the utxo cache after a node restart, since flushing # the cache is a no-op at that point if nodei_utxo_hash is not None: self.log.debug( "Checking txoutsetinfo matches for node {}".format(i)) assert_equal(nodei_utxo_hash, node3_utxo_hash) def verify_utxo_hash(self): """Verify that the utxo hash of each node matches node3. Restart any nodes that crash while querying.""" node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized'] self.log.info("Verifying utxo hash matches for all nodes") for i in range(3): try: nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()[ 'hash_serialized'] except OSError: # probably a crash on db flushing nodei_utxo_hash = self.restart_node( i, self.nodes[3].getbestblockhash()) assert_equal(nodei_utxo_hash, node3_utxo_hash) def generate_small_transactions(self, node, count, utxo_list): FEE = 1000 # TODO: replace this with node relay fee based calculation num_transactions = 0 random.shuffle(utxo_list) while len(utxo_list) >= 2 and num_transactions < count: tx = CTransaction() input_amount = 0 for i in range(2): utxo = utxo_list.pop() tx.vin.append( CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) input_amount += int(utxo['amount'] * COIN) output_amount = (input_amount - FEE) // 3 if output_amount <= 0: # Sanity check -- if we chose inputs that are too small, skip continue for i in range(3): tx.vout.append( CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex'] node.sendrawtransaction(tx_signed_hex) num_transactions += 1 def run_test(self): # Track test coverage statistics self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2 self.crashed_on_restart = 0 # Track count of crashes during recovery # Start by creating a lot of utxos on node3 initial_height = self.nodes[3].getblockcount() utxo_list = create_confirmed_utxos(self.nodes[3], 5000) self.log.info("Prepped {} utxo entries".format(len(utxo_list))) # Sync these blocks with the other nodes block_hashes_to_sync = [] for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1): block_hashes_to_sync.append(self.nodes[3].getblockhash(height)) self.log.debug("Syncing {} blocks with other nodes".format( len(block_hashes_to_sync))) # Syncing the blocks could cause nodes to crash, so the test begins here. self.sync_node3blocks(block_hashes_to_sync) starting_tip_height = self.nodes[3].getblockcount() # Main test loop: # each time through the loop, generate a bunch of transactions, # and then either mine a single new block on the tip, or some-sized reorg. for i in range(40): self.log.info( "Iteration {}, generating 2500 transactions {}".format( i, self.restart_counts)) # Generate a bunch of small-ish transactions self.generate_small_transactions(self.nodes[3], 2500, utxo_list) # Pick a random block between current tip, and starting tip current_height = self.nodes[3].getblockcount() random_height = random.randint(starting_tip_height, current_height) self.log.debug("At height {}, considering height {}".format( current_height, random_height)) if random_height > starting_tip_height: # Randomly reorg from this point with some probability (1/4 for # tip, 1/5 for tip-1, ...) if random.random() < 1.0 / (current_height + 4 - random_height): self.log.debug( "Invalidating block at height {}".format(random_height)) self.nodes[3].invalidateblock( self.nodes[3].getblockhash(random_height)) # Now generate new blocks until we pass the old tip height self.log.debug("Mining longer tip") block_hashes = [] while current_height + 1 > self.nodes[3].getblockcount(): block_hashes.extend(self.nodes[3].generate( min(10, current_height + 1 - self.nodes[3].getblockcount()))) self.log.debug( "Syncing {} new blocks...".format(len(block_hashes))) self.sync_node3blocks(block_hashes) utxo_list = self.nodes[3].listunspent() self.log.debug("Node3 utxo count: {}".format(len(utxo_list))) # Check that the utxo hashes agree with node3 # Useful side effect: each utxo cache gets flushed here, so that we # won't get crashes on shutdown at the end of the test. self.verify_utxo_hash() # Check the test coverage self.log.info("Restarted nodes: {}; crashes on restart: {}".format( self.restart_counts, self.crashed_on_restart)) # If no nodes were restarted, we didn't test anything. assert self.restart_counts != [0, 0, 0] # Make sure we tested the case of crash-during-recovery. assert self.crashed_on_restart > 0 # Warn if any of the nodes escaped restart. for i in range(3): if self.restart_counts[i] == 0: self.log.warn( "Node {} never crashed during utxo flush!".format(i)) if __name__ == "__main__": ChainstateWriteCrashTest().main() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 8dc7ee6d9..a9f399306 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -1,142 +1,147 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test BIP66 (DER SIG). Test that the DERSIG soft-fork activates at (regtest) height 1251. """ -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.mininode import * -from test_framework.blocktools import create_coinbase, create_block +from test_framework.blocktools import create_block, create_coinbase +from test_framework.messages import CTransaction, FromHex, msg_block +from test_framework.mininode import ( + mininode_lock, + network_thread_start, + P2PInterface, +) from test_framework.script import CScript +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, wait_until DERSIG_HEIGHT = 1251 # Reject codes that we might receive in this test REJECT_INVALID = 16 REJECT_OBSOLETE = 17 REJECT_NONSTANDARD = 64 # A canonical signature consists of: # <30> <02> <02> def unDERify(tx): """ Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. """ scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) def create_transaction(node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx) return FromHex(CTransaction(), signresult['hex']) class BIP66Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [ ['-promiscuousmempoolflags=1', '-whitelist=127.0.0.1']] self.setup_clean_chain = True def run_test(self): self.nodes[0].add_p2p_connection(P2PInterface()) network_thread_start() # wait_for_verack ensures that the P2P connection is fully up. self.nodes[0].p2p.wait_for_verack() self.log.info("Mining {} blocks".format(DERSIG_HEIGHT - 1)) self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 1) self.nodeaddress = self.nodes[0].getnewaddress() self.log.info("Test that blocks must now be at least version 3") tip = self.nodes[0].getbestblockhash() block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1 block = create_block( int(tip, 16), create_coinbase(DERSIG_HEIGHT), block_time) block.nVersion = 2 block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: assert_equal( self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE) assert_equal( self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)') assert_equal( self.nodes[0].p2p.last_message["reject"].data, block.sha256) del self.nodes[0].p2p.last_message["reject"] self.log.info( "Test that transactions with non-DER signatures cannot appear in a block") block.nVersion = 3 spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) unDERify(spendtx) spendtx.rehash() # Now we verify that a block with this transaction is invalid. block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(self.nodes[0].getbestblockhash(), tip) wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock) with mininode_lock: # We can receive different reject messages depending on whether # bitcoind is running with multiple script check threads. If script # check threads are not in use, then transaction script validation # happens sequentially, and bitcoind produces more specific reject # reasons. assert self.nodes[0].p2p.last_message["reject"].code in [ REJECT_INVALID, REJECT_NONSTANDARD] assert_equal( self.nodes[0].p2p.last_message["reject"].data, block.sha256) if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID: # Generic rejection when a block is invalid assert_equal( self.nodes[0].p2p.last_message["reject"].reason, b'blk-bad-inputs') else: assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason self.log.info( "Test that a version 3 block with a DERSIG-compliant transaction is accepted") block.vtx[1] = create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].p2p.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256) if __name__ == '__main__': BIP66Test().main() diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index 80c11fd4b..4027a751c 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -1,183 +1,184 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Test behavior of -maxuploadtarget. * Verify that getdata requests for old blocks (>1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respecteved even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. ''' from collections import defaultdict import time -from test_framework.mininode import * -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE from test_framework.blocktools import mine_big_block +from test_framework.messages import CInv, msg_getdata +from test_framework.mininode import network_thread_start, P2PInterface +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal class TestNode(P2PInterface): def __init__(self): super().__init__() self.block_receive_map = defaultdict(int) def on_inv(self, message): pass def on_block(self, message): message.block.calc_sha256() self.block_receive_map[message.block.sha256] += 1 class MaxUploadTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 # Start a node with maxuploadtarget of 200 MB (/24h) self.extra_args = [["-maxuploadtarget=200"]] # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache = [] def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2 * 60 * 60 * 24 * 7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # p2p_conns[0] will only request old blocks # p2p_conns[1] will only request new blocks # p2p_conns[2] will test resetting the counters p2p_conns = [] for _ in range(3): p2p_conns.append(self.nodes[0].add_p2p_connection(TestNode())) network_thread_start() for p2pc in p2p_conns: p2pc.wait_for_verack() # Test logic begins here # Now mine a big block mine_big_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24) # Mine one more block, so that the prior block looks old mine_big_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # p2p_conns[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 200 * 1024 * 1024 daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 144MB will be reserved for relaying new blocks, so expect this to # succeed for ~70 tries. for i in range(success_count): p2p_conns[0].send_message(getdata_request) p2p_conns[0].sync_with_ping() assert_equal(p2p_conns[0].block_receive_map[big_old_block], i + 1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info( "Peer 0 disconnected after downloading old block too many times") # Requesting the current block on p2p_conns[1] should succeed indefinitely, # even when over the max upload target. # We'll try 200 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(200): p2p_conns[1].send_message(getdata_request) p2p_conns[1].sync_with_ping() assert_equal(p2p_conns[1].block_receive_map[big_new_block], i + 1) self.log.info("Peer 1 able to repeatedly download new block") # But if p2p_conns[1] tries for an old block, it gets disconnected # too. getdata_request.inv = [CInv(2, big_old_block)] p2p_conns[1].send_message(getdata_request) p2p_conns[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and p2p_conns[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) p2p_conns[2].sync_with_ping() p2p_conns[2].send_message(getdata_request) p2p_conns[2].sync_with_ping() assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") self.nodes[0].disconnect_p2ps() # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") self.stop_node(0) self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"]) # Reconnect to self.nodes[0] self.nodes[0].add_p2p_connection(TestNode()) network_thread_start() self.nodes[0].p2p.wait_for_verack() # retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): self.nodes[0].p2p.send_message(getdata_request) self.nodes[0].p2p.sync_with_ping() assert_equal( self.nodes[0].p2p.block_receive_map[big_new_block], i + 1) getdata_request.inv = [CInv(2, big_old_block)] self.nodes[0].p2p.send_and_ping(getdata_request) # node is still connected because of the whitelist assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info( "Peer still connected after trying to download old block (whitelisted)") if __name__ == '__main__': MaxUploadTest().main() diff --git a/test/functional/feature_minchainwork.py b/test/functional/feature_minchainwork.py index 96b6393b9..76d46c349 100755 --- a/test/functional/feature_minchainwork.py +++ b/test/functional/feature_minchainwork.py @@ -1,99 +1,99 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test logic for setting nMinimumChainWork on command line. Nodes don't consider themselves out of "initial block download" until their active chain has more work than nMinimumChainWork. Nodes don't download blocks from a peer unless the peer's best known block has more work than nMinimumChainWork. While in initial block download, nodes won't relay blocks to their peers, so test that this parameter functions as intended by verifying that block relay only succeeds past a given node once its nMinimumChainWork has been exceeded. """ import time from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import connect_nodes, assert_equal +from test_framework.util import assert_equal, connect_nodes # 2 hashes required per regtest block (with no difficulty adjustment) REGTEST_WORK_PER_BLOCK = 2 class MinimumChainWorkTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 3 self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]] self.node_min_work = [0, 101, 101] def setup_network(self): # This test relies on the chain setup being: # node0 <- node1 <- node2 # Before leaving IBD, nodes prefer to download blocks from outbound # peers, so ensure that we're mining on an outbound peer and testing # block relay to inbound peers. self.setup_nodes() for i in range(self.num_nodes-1): connect_nodes(self.nodes[i+1], self.nodes[i]) def run_test(self): # Start building a chain on node0. node2 shouldn't be able to sync until node1's # minchainwork is exceeded starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work self.log.info( "Testing relay across node {} (minChainWork = {})".format( 1, self.node_min_work[1])) starting_blockcount = self.nodes[2].getblockcount() num_blocks_to_generate = int( (self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK) self.log.info("Generating {} blocks on node0".format( num_blocks_to_generate)) hashes = self.nodes[0].generate(num_blocks_to_generate) self.log.info("Node0 current chain work: {}".format( self.nodes[0].getblockheader(hashes[-1])['chainwork'])) # Sleep a few seconds and verify that node2 didn't get any new blocks # or headers. We sleep, rather than sync_blocks(node0, node1) because # it's reasonable either way for node1 to get the blocks, or not get # them (since they're below node1's minchainwork). time.sleep(3) self.log.info("Verifying node 2 has no more blocks than before") self.log.info("Blockcounts: {}".format( [n.getblockcount() for n in self.nodes])) # Node2 shouldn't have any new headers yet, because node1 should not # have relayed anything. assert_equal(len(self.nodes[2].getchaintips()), 1) assert_equal(self.nodes[2].getchaintips()[0]['height'], 0) assert self.nodes[1].getbestblockhash( ) != self.nodes[0].getbestblockhash() assert_equal(self.nodes[2].getblockcount(), starting_blockcount) self.log.info("Generating one more block") self.nodes[0].generate(1) self.log.info("Verifying nodes are all synced") # Because nodes in regtest are all manual connections (eg using # addnode), node1 should not have disconnected node0. If not for that, # we'd expect node1 to have disconnected node0 for serving an # insufficient work chain, in which case we'd need to reconnect them to # continue the test. self.sync_all() self.log.info("Blockcounts: {}".format( [n.getblockcount() for n in self.nodes])) if __name__ == '__main__': MinimumChainWorkTest().main() diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index cb71d362d..1526c9b86 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -1,119 +1,119 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the -alertnotify, -blocknotify and -walletnotify options.""" import os from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, wait_until, connect_nodes_bi +from test_framework.util import assert_equal, connect_nodes_bi, wait_until FORK_WARNING_MESSAGE = "Warning: Large-work fork detected, forking after block {}\n" class NotificationsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def setup_network(self): self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt") self.tx_filename = os.path.join( self.options.tmpdir, "transactions.txt") # -alertnotify and -blocknotify on node0, walletnotify on node1 self.extra_args = [["-blockversion=2", "-alertnotify=echo %s >> {}".format( self.alert_filename), "-blocknotify=echo %s >> {}".format(self.block_filename)], ["-blockversion=211", "-rescan", "-walletnotify=echo %s >> {}".format(self.tx_filename)]] super().setup_network() def run_test(self): self.log.info("test -blocknotify") block_count = 10 blocks = self.nodes[1].generate(block_count) # wait at most 10 seconds for expected file size before reading the content wait_until(lambda: os.path.isfile(self.block_filename) and os.stat( self.block_filename).st_size >= (block_count * 65), timeout=10) # file content should equal the generated blocks hashes with open(self.block_filename, 'r') as f: assert_equal(sorted(blocks), sorted(f.read().splitlines())) self.log.info("test -walletnotify") # wait at most 10 seconds for expected file size before reading the content wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat( self.tx_filename).st_size >= (block_count * 65), timeout=10) # file content should equal the generated transaction hashes txids_rpc = list( map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) with open(self.tx_filename, 'r') as f: assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) os.remove(self.tx_filename) self.log.info("test -walletnotify after rescan") # restart node to rescan to force wallet notifications self.restart_node(1) connect_nodes_bi(self.nodes[0], self.nodes[1]) wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat( self.tx_filename).st_size >= (block_count * 65), timeout=10) # file content should equal the generated transaction hashes txids_rpc = list( map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count))) with open(self.tx_filename, 'r') as f: assert_equal(sorted(txids_rpc), sorted(f.read().splitlines())) # Mine another 41 up-version blocks. -alertnotify should trigger on the 51st. self.log.info("test -alertnotify for bip9") self.nodes[1].generate(41) self.sync_all() # Give bitcoind 10 seconds to write the alert notification wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) with open(self.alert_filename, 'r', encoding='utf8') as f: alert_text = f.read() # Mine more up-version blocks, should not get more alerts: self.nodes[1].generate(2) self.sync_all() with open(self.alert_filename, 'r', encoding='utf8') as f: alert_text2 = f.read() os.remove(self.alert_filename) self.log.info( "-alertnotify should not continue notifying for more unknown version blocks") assert_equal(alert_text, alert_text2) # Create an invalid chain and ensure the node warns. self.log.info("test -alertnotify for forked chain") fork_block = self.nodes[0].getbestblockhash() self.nodes[0].generate(1) invalid_block = self.nodes[0].getbestblockhash() self.nodes[0].generate(7) # Invalidate a large branch, which should trigger an alert. self.nodes[0].invalidateblock(invalid_block) # Give bitcoind 10 seconds to write the alert notification wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10) self.log.info(self.alert_filename) with open(self.alert_filename, 'r', encoding='utf8') as f: assert_equal(f.read(), (FORK_WARNING_MESSAGE.format(fork_block))) if __name__ == '__main__': NotificationsTest().main() diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index 2d49b1973..27e47ac26 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -1,119 +1,120 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.messages import FromHex, ToHex -from test_framework.mininode import CTransaction, network_thread_start -from test_framework.blocktools import create_coinbase, create_block -from test_framework.script import CScript import time +from test_framework.blocktools import create_block, create_coinbase +from test_framework.messages import CTransaction, FromHex, ToHex +from test_framework.mininode import network_thread_start +from test_framework.script import CScript +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, assert_raises_rpc_error + NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" def trueDummy(tx): scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): assert(len(i) == 0) newscript.append(b'\x51') else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) tx.rehash() ''' This test is meant to exercise NULLDUMMY softfork. Connect to a single node. Generate 2 blocks (save the coinbases for later). Generate 427 more blocks. [Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. [Policy] Check that non-NULLDUMMY transactions are rejected before activation. [Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. [Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. ''' class NULLDUMMYTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.extra_args = [['-whitelist=127.0.0.1']] def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address]) network_thread_start() # Block 2 self.coinbase_blocks = self.nodes[0].generate(2) coinbase_txid = [] for i in self.coinbase_blocks: coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) # Block 429 self.nodes[0].generate(427) self.lastblockhash = self.nodes[0].getbestblockhash() self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = 429 self.lastblocktime = int(time.time()) + 429 self.log.info( "Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction( self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.nodes[0].sendrawtransaction(ToHex(test1txs[0]), True) test1txs.append(self.create_transaction( self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.nodes[0].sendrawtransaction(ToHex(test1txs[1]), True) self.block_submit(self.nodes[0], test1txs, True) self.log.info( "Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction( self.nodes[0], txid2, self.ms_address, 48) trueDummy(test2tx) assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, ToHex(test2tx), True) self.log.info( "Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") self.block_submit(self.nodes[0], [test2tx], True) def create_transaction(self, node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx) return FromHex(CTransaction(), signresult['hex']) def block_submit(self, node, txs, accept=False): block = create_block(self.tip, create_coinbase( self.lastblockheight + 1), self.lastblocktime + 1) block.nVersion = 4 for tx in txs: tx.rehash() block.vtx.append(tx) block.vtx = [block.vtx[0]] + \ sorted(block.vtx[1:], key=lambda tx: tx.get_id()) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(ToHex(block)) if (accept): assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 else: assert_equal(node.getbestblockhash(), self.lastblockhash) if __name__ == '__main__': NULLDUMMYTest().main() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 8e1bbd48d..86b369e30 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -1,225 +1,230 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. ''' Test plan: - Start bitcoind's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on bitcoind side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create bitcoinds that connect to them - Manipulate the bitcoinds using addnode (onetry) an observe effects addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name ''' -import socket import os +import socket -from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType +from test_framework.netutil import test_ipv6_local +from test_framework.socks5 import ( + AddressType, + Socks5Command, + Socks5Configuration, + Socks5Server, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( + assert_equal, PORT_MIN, PORT_RANGE, - assert_equal, ) -from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports class ProxyTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ( '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ( '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.warning("Testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), # which return NET_UNROUTABLE for localhost args = [ ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-proxyrandomize=1'], ['-listen', '-proxy={}:{}'.format( self.conf1.addr[0], self.conf1.addr[1]), '-onion={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=0'], ['-listen', '-proxy={}:{}'.format( self.conf2.addr[0], self.conf2.addr[1]), '-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1]), '-proxyrandomize=0', '-noonion'] self.add_nodes(self.num_nodes, extra_args=args) self.start_nodes() def node_test(self, node, proxies, auth, test_onion=True): rv = [] # Test: outgoing IPv4 connection through node node.addnode("15.61.23.23:1234", "onetry") cmd = proxies[0].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, # even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if self.have_ipv6: # Test: outgoing IPv6 connection through node node.addnode( "[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") cmd = proxies[1].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp # DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if test_onion: # Test: outgoing onion connection through node node.addnode("bitcoinostk4e4re.onion:8333", "onetry") cmd = proxies[2].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) # Test: outgoing DNS name connection through node node.addnode("node.noumenon:8333", "onetry") cmd = proxies[3].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) return rv def run_test(self): # basic -proxy self.node_test( self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test( self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test( self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are # unique credentials = set((x.username, x.password) for x in rv) assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test( self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d): r = {} for x in d['networks']: r[x['name']] = x return r # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n0[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], '{}:{}'.format( self.conf1.addr[0], self.conf1.addr[1])) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n2[net]['proxy'], '{}:{}'.format( self.conf2.addr[0], self.conf2.addr[1])) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n3[net]['proxy'], '[{}]:{}'.format( self.conf3.addr[0], self.conf3.addr[1])) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) if __name__ == '__main__': ProxyTest().main() diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index e9554a6db..dbcd273b9 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -1,517 +1,524 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test pruning code # ******** # WARNING: # This test uses 4GB of disk space. # This test takes 30 mins or more (up to 2 hours) # ******** -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import * -from test_framework.blocktools import mine_big_block - import time import os +from test_framework.blocktools import mine_big_block +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_greater_than, + assert_raises_rpc_error, + connect_nodes, + sync_blocks, +) + + MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. TIMESTAMP_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(blockdir + f)) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 6 # Create nodes 0 and 1 to mine. # Create node 2 to test pruning. self.full_node_default_args = ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-checkblocks=5", "-noparkdeepreorg", "-maxreorgdepth=-1", "-limitdescendantcount=100", "-limitdescendantsize=5000", "-limitancestorcount=100", "-limitancestorsize=5000"] # Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later) # Create nodes 5 to test wallet in prune mode, but do not connect self.extra_args = [self.full_node_default_args, self.full_node_default_args, ["-maxreceivebuffer=20000", "-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-maxreceivebuffer=20000", "-blockmaxsize=999000", "-noparkdeepreorg", "-maxreorgdepth=-1"], ["-prune=550"]] def setup_network(self): self.setup_nodes() self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[1], self.nodes[2]) connect_nodes(self.nodes[2], self.nodes[0]) connect_nodes(self.nodes[0], self.nodes[3]) connect_nodes(self.nodes[0], self.nodes[4]) sync_blocks(self.nodes[0:5]) def setup_nodes(self): self.add_nodes(self.num_nodes, self.extra_args, timewait=900) self.start_nodes() def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data for i in range(645): mine_big_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(self.prunedir + "blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: {}".format( calc_usage(self.prunedir))) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this for i in range(25): mine_big_block(self.nodes[0], self.utxo_cache_0) waitstart = time.time() while os.path.isfile(self.prunedir + "blk00000.dat"): time.sleep(0.1) if time.time() - waitstart > 30: raise AssertionError( "blk00000.dat not pruned when it should be") self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have # node1's transactions to accidentally mine self.stop_node(0) self.start_node(0, extra_args=self.full_node_default_args) # Mine 24 blocks in node 1 for i in range(24): if j == 0: mine_big_block(self.nodes[1], self.utxo_cache_1) else: # Add node1's wallet transactions back to the mempool, to # avoid the mined blocks from being too small. self.nodes[1].resendwallettransactions() # tx's already in mempool from previous disconnects self.nodes[1].generate(1) # Reorg back with 25 block chain from node 0 for i in range(25): mine_big_block(self.nodes[0], self.utxo_cache_0) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[1], self.nodes[0]) connect_nodes(self.nodes[2], self.nodes[0]) sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: {}".format( calc_usage(self.prunedir))) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. Reboot node 1 to # clear its mempool (hopefully make the invalidate faster). Lower the # block max size so we don't keep mining all our big mempool # transactions (from disconnected blocks) self.stop_node(1) self.start_node(1, extra_args=[ "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode", "-noparkdeepreorg", "-maxreorgdepth=-1"]) height = self.nodes[1].getblockcount() self.log.info("Current block height: {}".format(height)) invalidheight = height - 287 badhash = self.nodes[1].getblockhash(invalidheight) self.log.info("Invalidating block {} at height {}".format( badhash, invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) self.log.info("New best height: {}".format( self.nodes[1].getblockcount())) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) self.start_node(1, extra_args=[ "-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], self.nodes[1]) connect_nodes(self.nodes[2], self.nodes[1]) sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: {}".format( self.nodes[2].getblockcount())) self.log.info("Usage possibly still high bc of stale blocks in block files: {}".format( calc_usage(self.prunedir))) self.log.info( "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") # Get node0's wallet transactions back in its mempool, to avoid the # mined blocks from being too small. self.nodes[0].resendwallettransactions() for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. # node 0 has many large tx's in its mempool from the disconnects self.nodes[0].generate(10) sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: {}".format(usage)) if (usage > 550): raise AssertionError("Pruning target not being met") return invalidheight, badhash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_rpc_error( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) self.log.info( "Will need to redownload block {}".format(self.forkheight)) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large block are in # the block files after it, its expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer thats on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {}".format( blocks_to_mine)) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") waitstart = time.time() while self.nodes[2].getblockcount() < goalbestheight: time.sleep(0.1) if time.time() - waitstart > 900: raise AssertionError("Node 2 didn't reorg to proper height") assert(self.nodes[2].getbestblockhash() == goalbesthash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock( self.forkhash)["height"] == self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode self.start_node(node_number) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500) # now re-start in manual pruning mode self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=1"]) node = self.nodes[node_number] assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_rpc_error( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # negative heights should raise an exception assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # Does nothing node.pruneblockchain(height(0)) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # height=500 should prune first file prune(500) if has_block(0): raise AssertionError( "blk00000.dat is still there, should be pruned by now") if not has_block(1): raise AssertionError( "blk00001.dat is missing when should still be there") # height=650 should prune second file prune(650) if has_block(1): raise AssertionError( "blk00001.dat is still there, should be pruned by now") # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) if not has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) if has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") if has_block(3): raise AssertionError( "blk00003.dat is still there, should be pruned by now") # stop node, start back up with auto-prune at 550MB, make sure still # runs self.stop_node(node_number) self.start_node(node_number, extra_args=["-prune=550"]) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) self.start_node( 2, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") # check that wallet loads loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], self.nodes[5]) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) self.stop_node(5) # stop and start to trigger rescan self.start_node( 5, extra_args=["-prune=550", "-noparkdeepreorg", "-maxreorgdepth=-1"]) self.log.info("Success") def run_test(self): self.log.info( "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") self.log.info("Mining a big blockchain of 995 blocks") # Determine default relay fee self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache_0 = [] self.utxo_cache_1 = [] self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") (self.forkheight, self.forkhash) = self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py index 3bc4bfc74..fcef4ef21 100755 --- a/test/functional/feature_reindex.py +++ b/test/functional/feature_reindex.py @@ -1,40 +1,41 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test -reindex and -reindex-chainstate with CheckBlockIndex # +import time + from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal -import time class ReindexTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 def reindex(self, justchainstate=False): self.nodes[0].generate(3) blockcount = self.nodes[0].getblockcount() self.stop_nodes() extra_args = [ ["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] self.start_nodes(extra_args) while self.nodes[0].getblockcount() < blockcount: time.sleep(0.1) assert_equal(self.nodes[0].getblockcount(), blockcount) self.log.info("Success") def run_test(self): self.reindex(False) self.reindex(True) self.reindex(False) self.reindex(True) if __name__ == '__main__': ReindexTest().main()