diff --git a/qa/rpc-tests/abandonconflict.py b/qa/rpc-tests/abandonconflict.py index 394e58ade..f2e86276d 100755 --- a/qa/rpc-tests/abandonconflict.py +++ b/qa/rpc-tests/abandonconflict.py @@ -1,188 +1,180 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import urllib.parse class AbandonConflictTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False - - def setup_network(self): - self.nodes = [] - self.nodes.append( - start_node(0, self.options.tmpdir, - ["-logtimemicros", "-minrelaytxfee=0.00001"])) - self.nodes.append( - start_node(1, self.options.tmpdir, ["-logtimemicros"])) - connect_nodes(self.nodes[0], 1) + self.extra_args = [["-minrelaytxfee=0.00001"], []] def run_test(self): self.nodes[1].generate(100) sync_blocks(self.nodes) balance = self.nodes[0].getbalance() txA = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) txB = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) txC = self.nodes[0].sendtoaddress( self.nodes[0].getnewaddress(), Decimal("10")) sync_mempools(self.nodes) self.nodes[1].generate(1) sync_blocks(self.nodes) newbalance = self.nodes[0].getbalance() # no more than fees lost assert(balance - newbalance < Decimal("0.001")) balance = newbalance url = urllib.parse.urlparse(self.nodes[1].url) self.nodes[0].disconnectnode(url.hostname + ":" + str(p2p_port(1))) # Identify the 10btc outputs nA = next(i for i, vout in enumerate( self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10")) nB = next(i for i, vout in enumerate( self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10")) nC = next(i for i, vout in enumerate( self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10")) inputs = [] # spend 10btc outputs from txA and txB inputs.append({"txid": txA, "vout": nA}) inputs.append({"txid": txB, "vout": nB}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998") outputs[self.nodes[1].getnewaddress()] = Decimal("5") signed = self.nodes[0].signrawtransaction( self.nodes[0].createrawtransaction(inputs, outputs), None, None, "ALL|FORKID") txAB1 = self.nodes[0].sendrawtransaction(signed["hex"]) # Identify the 14.99998btc output nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction( txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998")) # Create a child tx spending AB1 and C inputs = [] inputs.append({"txid": txAB1, "vout": nAB}) inputs.append({"txid": txC, "vout": nC}) outputs = {} outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996") signed2 = self.nodes[0].signrawtransaction( self.nodes[0].createrawtransaction(inputs, outputs), None, None, "ALL|FORKID") txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"]) # In mempool txs from self should increase balance from change newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996")) balance = newbalance # Restart the node with a higher min relay fee so the parent tx is no longer in mempool # TODO: redo with eviction # Note had to make sure tx did not have AllowFree priority stop_node(self.nodes[0], 0) self.nodes[0] = start_node(0, self.options.tmpdir, [ "-logtimemicros", "-minrelaytxfee=0.0001"]) # Verify txs no longer in mempool assert_equal(len(self.nodes[0].getrawmempool()), 0) # Not in mempool txs from self should only reduce balance # inputs are still spent, but change not received newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) # Unconfirmed received funds that are not in mempool, also shouldn't show # up in unconfirmed balance unconfbalance = self.nodes[ 0].getunconfirmedbalance() + self.nodes[0].getbalance() assert_equal(unconfbalance, newbalance) # Also shouldn't show up in listunspent assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)]) balance = newbalance # Abandon original transaction and verify inputs are available again # including that the child tx was also abandoned self.nodes[0].abandontransaction(txAB1) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("30")) balance = newbalance # Verify that even with a low min relay fee, the tx is not reaccepted # from wallet on startup once abandoned stop_node(self.nodes[0], 0) self.nodes[0] = start_node(0, self.options.tmpdir, [ "-logtimemicros", "-minrelaytxfee=0.00001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) assert_equal(self.nodes[0].getbalance(), balance) # But if its received again then it is unabandoned # And since now in mempool, the change is available # But its child tx remains abandoned self.nodes[0].sendrawtransaction(signed["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998")) balance = newbalance # Send child tx again so its unabandoned self.nodes[0].sendrawtransaction(signed2["hex"]) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996")) balance = newbalance # Remove using high relay fee again stop_node(self.nodes[0], 0) self.nodes[0] = start_node(0, self.options.tmpdir, [ "-logtimemicros", "-minrelaytxfee=0.0001"]) assert_equal(len(self.nodes[0].getrawmempool()), 0) newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance - Decimal("24.9996")) balance = newbalance # Create a double spend of AB1 by spending again from only A's 10 output # Mine double spend from node 1 inputs = [] inputs.append({"txid": txA, "vout": nA}) outputs = {} outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999") tx = self.nodes[0].createrawtransaction(inputs, outputs) signed = self.nodes[0].signrawtransaction(tx, None, None, "ALL|FORKID") self.nodes[1].sendrawtransaction(signed["hex"]) self.nodes[1].generate(1) connect_nodes(self.nodes[0], 1) sync_blocks(self.nodes) # Verify that B and C's 10 BTC outputs are available for spending again # because AB1 is now conflicted newbalance = self.nodes[0].getbalance() assert_equal(newbalance, balance + Decimal("20")) balance = newbalance # There is currently a minor bug around this and so this test doesn't work. See Issue #7315 # Invalidate the block with the double spend and B's 10 BTC output should no longer be available # Don't think C's should either self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) newbalance = self.nodes[0].getbalance() # assert_equal(newbalance, balance - Decimal("10")) self.log.info( "If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer") self.log.info( "conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315") self.log.info(str(balance) + " -> " + str(newbalance) + " ?") if __name__ == '__main__': AbandonConflictTest().main() diff --git a/qa/rpc-tests/abc-cmdline.py b/qa/rpc-tests/abc-cmdline.py index 94ead7356..9ecfb54d4 100755 --- a/qa/rpc-tests/abc-cmdline.py +++ b/qa/rpc-tests/abc-cmdline.py @@ -1,107 +1,104 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Exercise the command line functions specific to ABC functionality. Currently: -excessiveblocksize= """ import re from test_framework.test_framework import BitcoinTestFramework from test_framework.util import (start_node, stop_node, assert_equal) from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE from test_framework.outputchecker import OutputChecker MAX_GENERATED_BLOCK_SIZE_ERROR = ( 'Max generated block size (blockmaxsize) cannot exceed the excessive block size (excessiveblocksize)') class ABC_CmdLine_Test (BitcoinTestFramework): def __init__(self): super(ABC_CmdLine_Test, self).__init__() self.num_nodes = 1 self.setup_clean_chain = False - def setup_network(self): - self.nodes = self.setup_nodes() - def check_excessive(self, expected_value): 'Check that the excessiveBlockSize is as expected' getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, expected_value) def check_subversion(self, pattern_str): 'Check that the subversion is set as expected' netinfo = self.nodes[0].getnetworkinfo() subversion = netinfo['subversion'] pattern = re.compile(pattern_str) assert(pattern.match(subversion)) def excessiveblocksize_test(self): self.log.info("Testing -excessiveblocksize") self.log.info(" Set to twice the default, i.e. %d bytes" % (2 * LEGACY_MAX_BLOCK_SIZE)) stop_node(self.nodes[0], 0) self.extra_args = [["-excessiveblocksize=%d" % (2 * LEGACY_MAX_BLOCK_SIZE)]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0]) self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE) # Check for EB correctness in the subver string - self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/") + self.check_subversion("/Bitcoin ABC:.*\(EB2\.0; .*\)/") self.log.info(" Attempt to set below legacy limit of 1MB - try %d bytes" % LEGACY_MAX_BLOCK_SIZE) outputchecker = OutputChecker() stop_node(self.nodes[0], 0) try: self.extra_args = [ ["-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: assert(outputchecker.contains( 'Error: Excessive block size must be > 1,000,000 bytes (1MB)')) assert_equal( 'bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError("Must not accept excessiveblocksize" " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE) self.log.info(" Attempt to set below blockmaxsize (mining limit)") outputchecker = OutputChecker() try: self.extra_args = [['-blockmaxsize=1500000', '-excessiveblocksize=1300000']] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: assert(outputchecker.contains( 'Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) assert_equal( 'bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError('Must not accept excessiveblocksize' ' below blockmaxsize') # Make sure we leave the test with a node running as this is what thee # framework expects. self.nodes[0] = start_node(0, self.options.tmpdir, []) def run_test(self): # Run tests on -excessiveblocksize option self.excessiveblocksize_test() if __name__ == '__main__': ABC_CmdLine_Test().main() diff --git a/qa/rpc-tests/abc-p2p-fullblocktest.py b/qa/rpc-tests/abc-p2p-fullblocktest.py index e91e2144d..66c4955d1 100755 --- a/qa/rpc-tests/abc-p2p-fullblocktest.py +++ b/qa/rpc-tests/abc-p2p-fullblocktest.py @@ -1,514 +1,509 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks simple acceptance of bigger blocks via p2p. It is derived from the much more complex p2p-fullblocktest. The intention is that small tests can be derived from this one, or this one can be extended, to cover the checks done for bigger blocks (e.g. sigops limits). """ from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.comptool import TestManager, TestInstance, RejectResult from test_framework.blocktools import * import time from test_framework.key import CECKey from test_framework.script import * from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) class PreviousSpendableOutput(object): def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(SingleNodeConnCB): def __init__(self): self.last_sendcmpct = None self.last_cmpctblock = None self.last_getheaders = None self.last_headers = None SingleNodeConnCB.__init__(self) def on_sendcmpct(self, conn, message): self.last_sendcmpct = message def on_cmpctblock(self, conn, message): self.last_cmpctblock = message self.last_cmpctblock.header_and_shortids.header.calc_sha256() def on_getheaders(self, conn, message): self.last_getheaders = message def on_headers(self, conn, message): self.last_headers = message for x in self.last_headers.headers: x.calc_sha256() def clear_block_data(self): with mininode_lock: self.last_sendcmpct = None self.last_cmpctblock = None class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. # Change the "outcome" variable from each TestInstance object to only do # the comparison. def __init__(self): super().__init__() - self.excessive_block_size = 16 * ONE_MEGABYTE self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} - - def setup_network(self): + self.excessive_block_size = 16 * ONE_MEGABYTE self.extra_args = [['-norelaypriority', '-whitelist=127.0.0.1', '-limitancestorcount=9999', '-limitancestorsize=9999', '-limitdescendantcount=9999', '-limitdescendantsize=9999', '-maxmempool=999', "-excessiveblocksize=%d" % self.excessive_block_size]] - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - self.extra_args, - binary=[self.options.testbinary]) def add_options(self, parser): super().add_options(parser) parser.add_option( "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() # Set the blocksize to 2MB as initial condition self.nodes[0].setexcessiveblock(self.excessive_block_size) self.test.run() def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about # this signs input 0 in tx, which is assumed to be spending output n in # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return sighash = SignatureHashForkId( spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) tx.vin[0].scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True): """ Create a block on top of self.tip, and advance self.tip to point to the new block if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, and rest will go to fees. """ if self.tip == None: base_block_hash = self.genesis_hash block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): coinbase.vout[0].nValue += spend.tx.vout[ spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) spendable_output = None if (spend != None): tx = CTransaction() # no signature yet tx.vin.append( CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # We put some random data into the first transaction of the chain # to randomize ids tx.vout.append( CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) spendable_output = PreviousSpendableOutput(tx, 0) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it sighash = SignatureHashForkId( spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) scriptSig = CScript( [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if spendable_output != None and block_size > 0: while len(block.serialize()) < block_size: tx = CTransaction() script_length = block_size - len(block.serialize()) - 79 if script_length > 510000: script_length = 500000 tx_sigops = min( extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) extra_sigops -= tx_sigops script_pad_len = script_length - tx_sigops script_output = CScript( [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx.vout.append(CTxOut(0, script_output)) tx.vin.append( CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) spendable_output = PreviousSpendableOutput(tx, 0) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Make sure the math above worked out to produce the correct block size # (the math will fail if there are too many transactions in the block) assert_equal(len(block.serialize()), block_size) # Make sure all the requested sigops have been included assert_equal(extra_sigops, 0) if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: self.block_heights[ block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 block(n, spend=out[i], block_size=n * ONE_MEGABYTE) yield accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block. tip(17) # Accept many sigops lots_of_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) block( 19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() too_many_blk_checksigs = CScript( [OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) block( 20, spend=out[18], script=too_many_blk_checksigs, block_size=ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn too_many_tx_checksigs = CScript( [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) block( 29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script redeem_script = CScript([self.coinbase_pubkey] + [ OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction p2sh_tx = self.create_and_sign_transaction( out[22].tx, out[22].n, 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() # Creates a new transaction using the p2sh transaction included in the # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script sighash = SignatureHashForkId( redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) sig = self.coinbase_key.sign(sighash) + bytes( bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Check that compact block also work for big blocks node = self.nodes[0] peer = TestNode() peer.add_connection(NodeConn('127.0.0.1', p2p_port(0), node, peer)) # Start up network handling in another thread and wait for connection # to be etablished NetworkThread().start() peer.wait_for_verack() # Wait for SENDCMPCT def received_sendcmpct(): return (peer.last_sendcmpct != None) got_sendcmpt = wait_until(received_sendcmpct, timeout=30) assert(got_sendcmpt) sendcmpct = msg_sendcmpct() sendcmpct.version = 1 sendcmpct.announce = True peer.send_and_ping(sendcmpct) # Exchange headers def received_getheaders(): return (peer.last_getheaders != None) got_getheaders = wait_until(received_getheaders, timeout=30) assert(got_getheaders) # Return the favor peer.send_message(peer.last_getheaders) # Wait for the header list def received_headers(): return (peer.last_headers != None) got_headers = wait_until(received_headers, timeout=30) assert(got_headers) # It's like we know about the same headers ! peer.send_message(peer.last_headers) # Send a block b33 = block(33, spend=out[24], block_size=ONE_MEGABYTE + 1) yield accepted() # Checks the node to forward it via compact block def received_block(): return (peer.last_cmpctblock != None) got_cmpctblock = wait_until(received_block, timeout=30) assert(got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b33.sha256) # Send a bigger block peer.clear_block_data() b34 = block(34, spend=out[25], block_size=8 * ONE_MEGABYTE) yield accepted() # Checks the node to forward it via compact block got_cmpctblock = wait_until(received_block, timeout=30) assert(got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b34.sha256) # Let's send a compact block and see if the node accepts it. # First, we generate the block and send all transaction to the mempool b35 = block(35, spend=out[26], block_size=8 * ONE_MEGABYTE) for i in range(1, len(b35.vtx)): node.sendrawtransaction(ToHex(b35.vtx[i]), True) # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b35) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert(int(node.getbestblockhash(), 16) == b35.sha256) if __name__ == '__main__': FullBlockTest().main() diff --git a/qa/rpc-tests/abc-rpc.py b/qa/rpc-tests/abc-rpc.py index 8d0a6c29c..c62df85e0 100755 --- a/qa/rpc-tests/abc-rpc.py +++ b/qa/rpc-tests/abc-rpc.py @@ -1,105 +1,101 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the Bitcoin ABC RPC calls. import time import random import re from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import NODE_BITCOIN_CASH from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE) class ABC_RPC_Test (BitcoinTestFramework): def __init__(self): super(ABC_RPC_Test, self).__init__() self.num_nodes = 1 self.tip = None self.setup_clean_chain = True - - def setup_network(self): self.extra_args = [['-norelaypriority', '-whitelist=127.0.0.1', '-par=1']] - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - self.extra_args) - self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) def check_subversion(self, pattern_str): - 'Check that the subversion is set as expected' + # Check that the subversion is set as expected netinfo = self.nodes[0].getnetworkinfo() subversion = netinfo['subversion'] pattern = re.compile(pattern_str) assert(pattern.match(subversion)) def test_excessiveblock(self): # Check that we start with DEFAULT_MAX_BLOCK_SIZE getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, DEFAULT_MAX_BLOCK_SIZE) # Check that setting to legacy size is ok self.nodes[0].setexcessiveblock(LEGACY_MAX_BLOCK_SIZE + 1) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, LEGACY_MAX_BLOCK_SIZE + 1) # Check that going below legacy size is not accepted try: self.nodes[0].setexcessiveblock(LEGACY_MAX_BLOCK_SIZE) except JSONRPCException as e: assert("Invalid parameter, excessiveblock must be larger than %d" % LEGACY_MAX_BLOCK_SIZE in e.error['message']) else: raise AssertionError( "Must not accept excessiveblock values <= %d bytes" % LEGACY_MAX_BLOCK_SIZE) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, LEGACY_MAX_BLOCK_SIZE + 1) # Check setting to 2MB self.nodes[0].setexcessiveblock(2 * ONE_MEGABYTE) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, 2 * ONE_MEGABYTE) # Check for EB correctness in the subver string - self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/") + self.check_subversion("/Bitcoin ABC:.*\(EB2\.0; .*\)/") # Check setting to 13MB self.nodes[0].setexcessiveblock(13 * ONE_MEGABYTE) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, 13 * ONE_MEGABYTE) # Check for EB correctness in the subver string - self.check_subversion("/Bitcoin ABC:.*\(EB13\.0\)/") + self.check_subversion("/Bitcoin ABC:.*\(EB13\.0; .*\)/") # Check setting to 13.14MB self.nodes[0].setexcessiveblock(13140000) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, 13.14 * ONE_MEGABYTE) # check for EB correctness in the subver string - self.check_subversion("/Bitcoin ABC:.*\(EB13\.1\)/") + self.check_subversion("/Bitcoin ABC:.*\(EB13\.1; .*\)/") def test_cashservicebit(self): # Check that NODE_BITCOIN_CASH bit is set. # This can be seen in the 'localservices' entry of getnetworkinfo RPC. node = self.nodes[0] nw_info = node.getnetworkinfo() assert_equal(int(nw_info['localservices'], 16) & NODE_BITCOIN_CASH, NODE_BITCOIN_CASH) def run_test(self): + self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.test_excessiveblock() self.test_cashservicebit() if __name__ == '__main__': ABC_RPC_Test().main() diff --git a/qa/rpc-tests/bip65-cltv-p2p.py b/qa/rpc-tests/bip65-cltv-p2p.py index e500de42f..de1375479 100755 --- a/qa/rpc-tests/bip65-cltv-p2p.py +++ b/qa/rpc-tests/bip65-cltv-p2p.py @@ -1,193 +1,187 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block from test_framework.comptool import TestInstance, TestManager from test_framework.script import CScript, OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP from io import BytesIO import time def cltv_invalidate(tx): '''Modify the signature in vin 0 of the tx to fail CLTV Prepends -1 CLTV DROP in the scriptSig itself. ''' tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig))) ''' This test is meant to exercise BIP65 (CHECKLOCKTIMEVERIFY) Connect to a single node. Mine 2 (version 3) blocks (save the coinbases for later). Generate 98 more version 3 blocks, verify the node accepts. Mine 749 version 4 blocks, verify the node accepts. Check that the new CLTV rules are not enforced on the 750th version 4 block. Check that the new CLTV rules are enforced on the 751st version 4 block. Mine 199 new version blocks. Mine 1 old-version block. Mine 1 new version block. Mine 1 old version block, see that the node rejects. ''' class BIP65Test(ComparisonTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 - - def setup_network(self): - # Must set the blockversion for this test - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1', - '-blockversion=3']], - binary=[self.options.testbinary]) + self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=3']] def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() test.run() def create_transaction(self, node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def get_tests(self): self.coinbase_blocks = self.nodes[0].generate(2) height = 3 # height of the next block to build self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = int(time.time()) ''' 398 more version 3 blocks ''' test_blocks = [] for i in range(398): block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 749 version 4 blocks ''' test_blocks = [] for i in range(749): block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance(test_blocks, sync_every_block=False) ''' Check that the new CLTV rules are not enforced in the 750th version 3 block. ''' spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Mine 199 new version blocks on last valid tip ''' test_blocks = [] for i in range(199): block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 1 old version block ''' block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Mine 1 new version block ''' block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 4 block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Check that the new CLTV rules are enforced in the 951st version 4 block. ''' spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) cltv_invalidate(spendtx) spendtx.rehash() block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 4 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) ''' Mine 1 old version block, should be invalid ''' block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) if __name__ == '__main__': BIP65Test().main() diff --git a/qa/rpc-tests/bip65-cltv.py b/qa/rpc-tests/bip65-cltv.py index 8e8b6bb9c..c862f2fb0 100755 --- a/qa/rpc-tests/bip65-cltv.py +++ b/qa/rpc-tests/bip65-cltv.py @@ -1,99 +1,94 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test the CHECKLOCKTIMEVERIFY (BIP65) soft-fork logic # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class BIP65Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False + self.extra_args = [[], ["-blockversion=3"], ["-blockversion=4"]] def setup_network(self): - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, [])) - self.nodes.append( - start_node(1, self.options.tmpdir, ["-blockversion=3"])) - self.nodes.append( - start_node(2, self.options.tmpdir, ["-blockversion=4"])) + self.setup_nodes() connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) - self.is_network_split = False self.sync_all() def run_test(self): cnt = self.nodes[0].getblockcount() # Mine some old-version blocks self.nodes[1].generate(200) cnt += 100 self.sync_all() if (self.nodes[0].getblockcount() != cnt + 100): raise AssertionError("Failed to mine 100 version=3 blocks") # Mine 750 new-version blocks for i in range(15): self.nodes[2].generate(50) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 850): raise AssertionError("Failed to mine 750 version=4 blocks") # TODO: check that new CHECKLOCKTIMEVERIFY rules are not enforced # Mine 1 new-version block self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 851): raise AssertionError("Failed to mine a version=4 blocks") # TODO: check that new CHECKLOCKTIMEVERIFY rules are enforced # Mine 198 new-version blocks for i in range(2): self.nodes[2].generate(99) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1049): raise AssertionError("Failed to mine 198 version=4 blocks") # Mine 1 old-version block self.nodes[1].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1050): raise AssertionError( "Failed to mine a version=3 block after 949 version=4 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError("Failed to mine a version=4 block") # Mine 1 old-version blocks try: self.nodes[1].generate(1) raise AssertionError( "Succeeded to mine a version=3 block after 950 version=4 blocks") except JSONRPCException: pass self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError( "Accepted a version=3 block after 950 version=4 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1052): raise AssertionError("Failed to mine a version=4 block") if __name__ == '__main__': BIP65Test().main() diff --git a/qa/rpc-tests/bip68-112-113-p2p.py b/qa/rpc-tests/bip68-112-113-p2p.py index 58d6351be..ee55da7f9 100755 --- a/qa/rpc-tests/bip68-112-113-p2p.py +++ b/qa/rpc-tests/bip68-112-113-p2p.py @@ -1,620 +1,614 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.mininode import ToHex, CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block from test_framework.comptool import TestInstance, TestManager from test_framework.script import * from io import BytesIO import time ''' This test is meant to exercise activation of the first version bits soft fork This soft fork will activate the following BIPS: BIP 68 - nSequence relative lock times BIP 112 - CHECKSEQUENCEVERIFY BIP 113 - MedianTimePast semantics for nLockTime regtest lock-in with 108/144 block signalling activation after a further 144 blocks mine 82 blocks whose coinbases will be used to generate inputs for our tests mine 61 blocks to transition from DEFINED to STARTED mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572 mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered mine 1 block and test that enforcement has triggered (which triggers ACTIVE) Test BIP 113 is enforced Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height Mine 1 block so next height is 581 and test BIP 68 now passes time but not height Mine 1 block so next height is 582 and test BIP 68 now passes time and height Test that BIP 112 is enforced Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates And that after the soft fork activates transactions pass and fail as they should according to the rules. For each BIP, transactions of versions 1 and 2 will be tested. ---------------- BIP 113: bip113tx - modify the nLocktime variable BIP 68: bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below BIP 112: bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP bip112tx_special - test negative argument to OP_CSV ''' base_relative_locktime = 10 seq_disable_flag = 1 << 31 seq_random_high_bit = 1 << 25 seq_type_flag = 1 << 22 seq_random_low_bit = 1 << 18 # b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field # relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with # the indicated bits set if their indices are 1 relative_locktimes = [] for b31 in range(2): b25times = [] for b25 in range(2): b22times = [] for b22 in range(2): b18times = [] for b18 in range(2): rlt = base_relative_locktime if (b31): rlt = rlt | seq_disable_flag if (b25): rlt = rlt | seq_random_high_bit if (b22): rlt = rlt | seq_type_flag if (b18): rlt = rlt | seq_random_low_bit b18times.append(rlt) b22times.append(b18times) b25times.append(b22times) relative_locktimes.append(b25times) def all_rlt_txs(txarray): txs = [] for b31 in range(2): for b25 in range(2): for b22 in range(2): for b18 in range(2): txs.append(txarray[b31][b25][b22][b18]) return txs class BIP68_112_113Test(ComparisonTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 - - def setup_network(self): - # Must set the blockversion for this test - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1', - '-blockversion=4']], - binary=[self.options.testbinary]) + self.extra_args = [['-whitelist=127.0.0.1', '-blockversion=4']] def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread test.run() def send_generic_input_tx(self, node, coinbases): amount = Decimal("49.99") return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount)))) def create_transaction(self, node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) tx = CTransaction() f = BytesIO(hex_str_to_bytes(rawtx)) tx.deserialize(f) return tx def sign_transaction(self, node, unsignedtx): rawtx = ToHex(unsignedtx) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def generate_blocks(self, number, version, test_blocks=[]): for i in range(number): block = self.create_test_block([], version) test_blocks.append([block, True]) self.last_block_time += 600 self.tip = block.sha256 self.tipheight += 1 return test_blocks def create_test_block(self, txs, version=536870912): block = create_block(self.tip, create_coinbase( self.tipheight + 1), self.last_block_time + 600) block.nVersion = version block.vtx.extend(txs) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() return block def create_bip68txs(self, bip68inputs, txversion, locktime_delta=0): txs = [] assert(len(bip68inputs) >= 16) i = 0 for b31 in range(2): b25txs = [] for b25 in range(2): b22txs = [] for b22 in range(2): b18txs = [] for b18 in range(2): tx = self.create_transaction( self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 tx.nVersion = txversion tx.vin[0].nSequence = relative_locktimes[ b31][b25][b22][b18] + locktime_delta b18txs.append(self.sign_transaction(self.nodes[0], tx)) b22txs.append(b18txs) b25txs.append(b22txs) txs.append(b25txs) return txs def create_bip112special(self, input, txversion): tx = self.create_transaction( self.nodes[0], input, self.nodeaddress, Decimal("49.98")) tx.nVersion = txversion signtx = self.sign_transaction(self.nodes[0], tx) signtx.vin[0].scriptSig = CScript( [-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) return signtx def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta=0): txs = [] assert(len(bip112inputs) >= 16) i = 0 for b31 in range(2): b25txs = [] for b25 in range(2): b22txs = [] for b22 in range(2): b18txs = [] for b18 in range(2): tx = self.create_transaction( self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98")) i += 1 if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed tx.vin[ 0].nSequence = base_relative_locktime + locktime_delta else: # vary nSequence instead, OP_CSV is fixed tx.vin[0].nSequence = relative_locktimes[ b31][b25][b22][b18] + locktime_delta tx.nVersion = txversion signtx = self.sign_transaction(self.nodes[0], tx) if (varyOP_CSV): signtx.vin[0].scriptSig = CScript( [relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) else: signtx.vin[0].scriptSig = CScript( [base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig))) b18txs.append(signtx) b22txs.append(b18txs) b25txs.append(b22txs) txs.append(b25txs) return txs def get_tests(self): long_past_time = int(time.time()) - 600 * \ 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will # still all be before long_past_time self.coinbase_blocks = self.nodes[0].generate( 1 + 16 + 2 * 32 + 1) # 82 blocks generated for inputs self.nodes[0].setmocktime( 0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time self.tipheight = 82 # height of the next block to build self.last_block_time = long_past_time self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.nodeaddress = self.nodes[0].getnewaddress() assert_equal( get_bip9_status(self.nodes[0], 'csv')['status'], 'defined') test_blocks = self.generate_blocks(61, 4) yield TestInstance(test_blocks, sync_every_block=False) # 1 # Advanced from DEFINED to STARTED, height = 143 assert_equal( get_bip9_status(self.nodes[0], 'csv')['status'], 'started') # Fail to achieve LOCKED_IN 100 out of 144 signal bit 0 # using a variety of bits to simulate multiple parallel softforks # 0x20000001 (signalling ready) test_blocks = self.generate_blocks(50, 536870913) # 0x00000004 (signalling not) test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x20000101 (signalling ready) test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20010000 (signalling not) test_blocks = self.generate_blocks(24, 536936448, test_blocks) yield TestInstance(test_blocks, sync_every_block=False) # 2 # Failed to advance past STARTED, height = 287 assert_equal( get_bip9_status(self.nodes[0], 'csv')['status'], 'started') # 108 out of 144 signal bit 0 to achieve lock-in # using a variety of bits to simulate multiple parallel softforks # 0x20000001 (signalling ready) test_blocks = self.generate_blocks(58, 536870913) # 0x00000004 (signalling not) test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x20000101 (signalling ready) test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20010000 (signalling not) test_blocks = self.generate_blocks(10, 536936448, test_blocks) yield TestInstance(test_blocks, sync_every_block=False) # 3 # Advanced from STARTED to LOCKED_IN, height = 431 assert_equal( get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') # 140 more version 4 blocks test_blocks = self.generate_blocks(140, 4) yield TestInstance(test_blocks, sync_every_block=False) # 4 # Inputs at height = 572 # Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block) # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] for i in range(16): bip68inputs.append( self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be # prepended to spending scriptSig) bip112basicinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append(self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP # (actually will be prepended to spending scriptSig) bip112diverseinputs = [] for j in range(2): inputs = [] for i in range(16): inputs.append(self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks)) bip112diverseinputs.append(inputs) # 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to # spending scriptSig) bip112specialinput = self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks) # 1 normal input bip113input = self.send_generic_input_tx( self.nodes[0], self.coinbase_blocks) self.nodes[0].setmocktime(self.last_block_time + 600) inputblockhash = self.nodes[0].generate(1)[ 0] # 1 block generated for inputs to be in chain at height 572 self.nodes[0].setmocktime(0) self.tip = int("0x" + inputblockhash, 0) self.tipheight += 1 self.last_block_time += 600 assert_equal( len(self.nodes[0].getblock(inputblockhash, True)["tx"]), 82 + 1) # 2 more version 4 blocks test_blocks = self.generate_blocks(2, 4) yield TestInstance(test_blocks, sync_every_block=False) # 5 # Not yet advanced to ACTIVE, height = 574 (will activate for block # 576, not 575) assert_equal( get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in') # Test both version 1 and version 2 transactions for all tests # BIP113 test transaction will be modified before each use to put in # appropriate block time bip113tx_v1 = self.create_transaction( self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE bip113tx_v1.nVersion = 1 bip113tx_v2 = self.create_transaction( self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98")) bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE bip113tx_v2.nVersion = 2 # For BIP68 test all 16 relative sequence locktimes bip68txs_v1 = self.create_bip68txs(bip68inputs, 1) bip68txs_v2 = self.create_bip68txs(bip68inputs, 2) # For BIP112 test: # 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_v1 = self.create_bip112txs( bip112basicinputs[0], False, 1) bip112txs_vary_nSequence_v2 = self.create_bip112txs( bip112basicinputs[0], False, 2) # 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs bip112txs_vary_nSequence_9_v1 = self.create_bip112txs( bip112basicinputs[1], False, 1, -1) bip112txs_vary_nSequence_9_v2 = self.create_bip112txs( bip112basicinputs[1], False, 2, -1) # sequence lock time of 10 against 16 (relative_lock_time) OP_CSV # OP_DROP inputs bip112txs_vary_OP_CSV_v1 = self.create_bip112txs( bip112diverseinputs[0], True, 1) bip112txs_vary_OP_CSV_v2 = self.create_bip112txs( bip112diverseinputs[0], True, 2) # sequence lock time of 9 against 16 (relative_lock_time) OP_CSV # OP_DROP inputs bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs( bip112diverseinputs[1], True, 1, -1) bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs( bip112diverseinputs[1], True, 2, -1) # -1 OP_CSV OP_DROP input bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1) bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2) # TESTING ### # # Before Soft Forks Activate ### # # All txs should pass # Version 1 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx bip113tx_v1.nLockTime = self.last_block_time - 600 * \ 5 # = MTP of prior block (not <) but < time put on current block bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) success_txs.append(bip113signed1) success_txs.append(bip112tx_special_v1) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v1)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1)) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1)) yield TestInstance([[self.create_test_block(success_txs), True]]) # 6 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Version 2 txs ### success_txs = [] # add BIP113 tx and -1 CSV tx bip113tx_v2.nLockTime = self.last_block_time - 600 * \ 5 # = MTP of prior block (not <) but < time put on current block bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) success_txs.append(bip113signed2) success_txs.append(bip112tx_special_v2) # add BIP 68 txs success_txs.extend(all_rlt_txs(bip68txs_v2)) # add BIP 112 with seq=10 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2)) # try BIP 112 with seq=9 txs success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2)) yield TestInstance([[self.create_test_block(success_txs), True]]) # 7 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # 1 more version 4 block to get us to height 575 so the fork should now # be active for the next block test_blocks = self.generate_blocks(1, 4) yield TestInstance(test_blocks, sync_every_block=False) # 8 assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active') # # After Soft Forks Activate ### # # BIP 113 ### # BIP 113 tests should now fail regardless of version number if # nLockTime isn't satisfied by new rules bip113tx_v1.nLockTime = self.last_block_time - 600 * \ 5 # = MTP of prior block (not <) but < time put on current block bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) bip113tx_v2.nLockTime = self.last_block_time - 600 * \ 5 # = MTP of prior block (not <) but < time put on current block bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10 # BIP 113 tests should now pass if the locktime is < MTP bip113tx_v1.nLockTime = self.last_block_time - \ 600 * 5 - 1 # < MTP of prior block bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1) bip113tx_v2.nLockTime = self.last_block_time - \ 600 * 5 - 1 # < MTP of prior block bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2) for bip113tx in [bip113signed1, bip113signed2]: yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Next block height = 580 after 4 blocks of random version test_blocks = self.generate_blocks(4, 1234) yield TestInstance(test_blocks, sync_every_block=False) # 13 # BIP 68 ### # Version 1 txs ### # All still pass success_txs = [] success_txs.extend(all_rlt_txs(bip68txs_v1)) yield TestInstance([[self.create_test_block(success_txs), True]]) # 14 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Version 2 txs ### bip68success_txs = [] # All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass for b25 in range(2): for b22 in range(2): for b18 in range(2): bip68success_txs.append(bip68txs_v2[1][b25][b22][b18]) yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # All txs without flag fail as we are at delta height = 8 < 10 and # delta time = 8 * 600 < 10 * 512 bip68timetxs = [] for b25 in range(2): for b18 in range(2): bip68timetxs.append(bip68txs_v2[0][b25][1][b18]) for tx in bip68timetxs: yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19 bip68heighttxs = [] for b25 in range(2): for b18 in range(2): bip68heighttxs.append(bip68txs_v2[0][b25][0][b18]) for tx in bip68heighttxs: yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23 # Advance one block to 581 test_blocks = self.generate_blocks(1, 1234) yield TestInstance(test_blocks, sync_every_block=False) # 24 # Height txs should fail and time txs should now pass 9 * 600 > 10 * # 512 bip68success_txs.extend(bip68timetxs) yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) for tx in bip68heighttxs: yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29 # Advance one block to 582 test_blocks = self.generate_blocks(1, 1234) yield TestInstance(test_blocks, sync_every_block=False) # 30 # All BIP 68 txs should pass bip68success_txs.extend(bip68heighttxs) yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # BIP 112 ### # Version 1 txs ### # -1 OP_CSV tx should fail yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) # 32 # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 1 txs should still pass success_txs = [] for b25 in range(2): for b22 in range(2): for b18 in range(2): success_txs.append( bip112txs_vary_OP_CSV_v1[1][b25][b22][b18]) success_txs.append( bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18]) yield TestInstance([[self.create_test_block(success_txs), True]]) # 33 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, # version 1 txs should now fail fail_txs = [] fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1)) fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1)) for b25 in range(2): for b22 in range(2): for b18 in range(2): fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18]) fail_txs.append( bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18]) for tx in fail_txs: yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81 # Version 2 txs ### # -1 OP_CSV tx should fail yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) # 82 # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, # version 2 txs should pass (all sequence locks are met) success_txs = [] for b25 in range(2): for b22 in range(2): for b18 in range(2): success_txs.append(bip112txs_vary_OP_CSV_v2[ 1][b25][b22][b18]) # 8/16 of vary_OP_CSV success_txs.append(bip112txs_vary_OP_CSV_9_v2[ 1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9 yield TestInstance([[self.create_test_block(success_txs), True]]) # 83 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ## # All txs with nSequence 9 should fail either due to earlier mismatch # or failing the CSV check fail_txs = [] fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9 for b25 in range(2): for b22 in range(2): for b18 in range(2): fail_txs.append(bip112txs_vary_OP_CSV_9_v2[ 0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9 for tx in fail_txs: yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107 # If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail fail_txs = [] for b25 in range(2): for b22 in range(2): for b18 in range(2): fail_txs.append(bip112txs_vary_nSequence_v2[ 1][b25][b22][b18]) # 8/16 of vary_nSequence for tx in fail_txs: yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115 # If sequencelock types mismatch, tx should fail fail_txs = [] for b25 in range(2): for b18 in range(2): fail_txs.append(bip112txs_vary_nSequence_v2[ 0][b25][1][b18]) # 12/16 of vary_nSequence fail_txs.append(bip112txs_vary_OP_CSV_v2[ 0][b25][1][b18]) # 12/16 of vary_OP_CSV for tx in fail_txs: yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123 # Remaining txs should pass, just test masking works properly success_txs = [] for b25 in range(2): for b18 in range(2): success_txs.append(bip112txs_vary_nSequence_v2[ 0][b25][0][b18]) # 16/16 of vary_nSequence success_txs.append(bip112txs_vary_OP_CSV_v2[ 0][b25][0][b18]) # 16/16 of vary_OP_CSV yield TestInstance([[self.create_test_block(success_txs), True]]) # 124 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Additional test, of checking that comparison of two time types works # properly time_txs = [] for b25 in range(2): for b18 in range(2): tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18] tx.vin[0].nSequence = base_relative_locktime | seq_type_flag signtx = self.sign_transaction(self.nodes[0], tx) time_txs.append(signtx) yield TestInstance([[self.create_test_block(time_txs), True]]) # 125 self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Missing aspects of test # Testing empty stack fails if __name__ == '__main__': BIP68_112_113Test().main() diff --git a/qa/rpc-tests/bip68-sequence.py b/qa/rpc-tests/bip68-sequence.py index 4c6c86b72..1400964f5 100755 --- a/qa/rpc-tests/bip68-sequence.py +++ b/qa/rpc-tests/bip68-sequence.py @@ -1,471 +1,463 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test BIP68 implementation # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.script import * from test_framework.mininode import * from test_framework.blocktools import * SEQUENCE_LOCKTIME_DISABLE_FLAG = (1 << 31) SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22) # this means use time (0 means height) SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift SEQUENCE_LOCKTIME_MASK = 0x0000ffff # RPC error for non-BIP68 final transactions NOT_FINAL_ERROR = "64: non-BIP68-final" class BIP68Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False + self.extra_args = [[], ["-acceptnonstdtxn=0"]] - def setup_network(self): - self.nodes = [] - self.nodes.append( - start_node(0, self.options.tmpdir, ["-blockprioritysize=0"])) - self.nodes.append( - start_node(1, self.options.tmpdir, ["-blockprioritysize=0", - "-acceptnonstdtxn=0"])) - self.is_network_split = False + def run_test(self): self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] - connect_nodes(self.nodes[0], 1) - def run_test(self): # Generate some coins self.nodes[0].generate(110) self.log.info("Running test disable flag") self.test_disable_flag() self.log.info("Running test sequence-lock-confirmed-inputs") self.test_sequence_lock_confirmed_inputs() self.log.info("Running test sequence-lock-unconfirmed-inputs") self.test_sequence_lock_unconfirmed_inputs() self.log.info( "Running test BIP68 not consensus before versionbits activation") self.test_bip68_not_consensus() self.log.info("Verifying nVersion=2 transactions aren't standard") self.test_version2_relay(before_activation=True) self.log.info("Activating BIP68 (and 112/113)") self.activateCSV() self.log.info("Verifying nVersion=2 transactions are now standard") self.test_version2_relay(before_activation=False) self.log.info("Passed") # Test that BIP68 is not in effect if tx version is 1, or if # the first sequence bit is set. def test_disable_flag(self): # Create some unconfirmed inputs new_addr = self.nodes[0].getnewaddress() self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC utxos = self.nodes[0].listunspent(0, 0) assert(len(utxos) > 0) utxo = utxos[0] tx1 = CTransaction() value = int(satoshi_round(utxo["amount"] - self.relayfee) * COIN) # Check that the disable flag disables relative locktime. # If sequence locks were used, this would require 1 block for the # input to mature. sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1 tx1.vin = [ CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, CScript([b'a']))] tx1_signed = self.nodes[0].signrawtransaction( ToHex(tx1), None, None, "ALL|FORKID")["hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) # This transaction will enable sequence-locks, so this transaction should # fail tx2 = CTransaction() tx2.nVersion = 2 sequence_value = sequence_value & 0x7fffffff tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)] tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a']))] tx2.rehash() try: self.nodes[0].sendrawtransaction(ToHex(tx2)) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(False) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 self.nodes[0].sendrawtransaction(ToHex(tx2)) # Calculate the median time past of a prior block ("confirmations" before # the current tip). def get_median_time_past(self, confirmations): block_hash = self.nodes[0].getblockhash( self.nodes[0].getblockcount() - confirmations) return self.nodes[0].getblockheader(block_hash)["mediantime"] # Test that sequence locks are respected for transactions spending # confirmed inputs. def test_sequence_lock_confirmed_inputs(self): # Create lots of confirmed utxos, and use them to generate lots of random # transactions. max_outputs = 50 addresses = [] while len(addresses) < max_outputs: addresses.append(self.nodes[0].getnewaddress()) while len(self.nodes[0].listunspent()) < 200: import random random.shuffle(addresses) num_outputs = random.randint(1, max_outputs) outputs = {} for i in range(num_outputs): outputs[addresses[i]] = random.randint(1, 20) * 0.01 self.nodes[0].sendmany("", outputs) self.nodes[0].generate(1) utxos = self.nodes[0].listunspent() # Try creating a lot of random transactions. # Each time, choose a random number of inputs, and randomly set # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. for i in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) # Track whether any sequence locks used should fail should_pass = True # Track whether this transaction was built with sequence locks using_sequence_locks = False tx = CTransaction() tx.nVersion = 2 value = 0 for j in range(num_inputs): sequence_value = 0xfffffffe # this disables sequence locks # 50% chance we enable sequence locks if random.randint(0, 1): using_sequence_locks = True # 10% of the time, make the input sequence value pass input_will_pass = (random.randint(1, 10) == 1) sequence_value = utxos[j]["confirmations"] if not input_will_pass: sequence_value += 1 should_pass = False # Figure out what the median-time-past was for the confirmed input # Note that if an input has N confirmations, we're going back N blocks # from the tip so that we're looking up MTP of the block # PRIOR to the one the input appears in, as per the BIP68 # spec. orig_time = self.get_median_time_past( utxos[j]["confirmations"]) cur_time = self.get_median_time_past(0) # MTP of the tip # can only timelock this input if it's not too old -- # otherwise use height can_time_lock = True if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK: can_time_lock = False # if time-lockable, then 50% chance we make this a time # lock if random.randint(0, 1) and can_time_lock: # Find first time-lock value that fails, or latest one # that succeeds time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY if input_will_pass and time_delta > cur_time - orig_time: sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) elif (not input_will_pass and time_delta <= cur_time - orig_time): sequence_value = ( (cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) + 1 sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx.vin.append( CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"] * COIN # Overestimate the size of the tx - signatures should be less than # 120 bytes, and leave 50 for the output tx_size = len(ToHex(tx)) // 2 + 120 * num_inputs + 50 tx.vout.append( CTxOut(int(value - self.relayfee * tx_size * COIN / 1000), CScript([b'a']))) rawtx = self.nodes[0].signrawtransaction( ToHex(tx), None, None, "ALL|FORKID")["hex"] try: self.nodes[0].sendrawtransaction(rawtx) except JSONRPCException as exp: assert(not should_pass and using_sequence_locks) assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(should_pass or not using_sequence_locks) # Recalculate utxos if we successfully sent the transaction utxos = self.nodes[0].listunspent() # Test that sequence locks on unconfirmed inputs must have nSequence # height or time of 0 to be accepted. # Then test that BIP68-invalid transactions are removed from the mempool # after a reorg. def test_sequence_lock_unconfirmed_inputs(self): # Store height so we can easily reset the chain at the end of the test cur_height = self.nodes[0].getblockcount() # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Anyone-can-spend mempool tx. # Sequence lock of 0 should pass. tx2 = CTransaction() tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] tx2_raw = self.nodes[0].signrawtransaction( ToHex(tx2), None, None, "ALL|FORKID")["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) # Create a spend of the 0th output of orig_tx with a sequence lock # of 1, and test what happens when submitting. # orig_tx.vout[0] must be an anyone-can-spend output def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock): sequence_value = 1 if not use_height_lock: sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG tx = CTransaction() tx.nVersion = 2 tx.vin = [ CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)] tx.vout = [ CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN), CScript([b'a']))] tx.rehash() try: node.sendrawtransaction(ToHex(tx)) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) assert(orig_tx.hash in node.getrawmempool()) else: # orig_tx must not be in mempool assert(orig_tx.hash not in node.getrawmempool()) return tx test_nonzero_locks( tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Now mine some blocks, but make sure tx2 doesn't get mined. # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction( tx2.hash, -1e15, int(-self.relayfee * COIN)) cur_time = int(time.time()) for i in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 assert(tx2.hash in self.nodes[0].getrawmempool()) test_nonzero_locks( tx2, self.nodes[0], self.relayfee, use_height_lock=True) test_nonzero_locks( tx2, self.nodes[0], self.relayfee, use_height_lock=False) # Mine tx2, and then try again self.nodes[0].prioritisetransaction( tx2.hash, 1e15, int(self.relayfee * COIN)) # Advance the time on the node so that we can test timelocks self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) assert(tx2.hash not in self.nodes[0].getrawmempool()) # Now that tx2 is not in the mempool, a sequence locked spend should # succeed tx3 = test_nonzero_locks( tx2, self.nodes[0], self.relayfee, use_height_lock=False) assert(tx3.hash in self.nodes[0].getrawmempool()) self.nodes[0].generate(1) assert(tx3.hash not in self.nodes[0].getrawmempool()) # One more test, this time using height locks tx4 = test_nonzero_locks( tx3, self.nodes[0], self.relayfee, use_height_lock=True) assert(tx4.hash in self.nodes[0].getrawmempool()) # Now try combining confirmed and unconfirmed inputs tx5 = test_nonzero_locks( tx4, self.nodes[0], self.relayfee, use_height_lock=True) assert(tx5.hash not in self.nodes[0].getrawmempool()) utxos = self.nodes[0].listunspent() tx5.vin.append( CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"] * COIN) raw_tx5 = self.nodes[0].signrawtransaction( ToHex(tx5), None, None, "ALL|FORKID")["hex"] try: self.nodes[0].sendrawtransaction(raw_tx5) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(False) # Test mempool-BIP68 consistency after reorg # # State of the transactions in the last blocks: # ... -> [ tx2 ] -> [ tx3 ] # tip-1 tip # And currently tx4 is in the mempool. # # If we invalidate the tip, tx3 should get added to the mempool, causing # tx4 to be removed (fails sequence-lock). self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) assert(tx4.hash not in self.nodes[0].getrawmempool()) assert(tx3.hash in self.nodes[0].getrawmempool()) # Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in # diagram above). # This would cause tx2 to be added back to the mempool, which in turn causes # tx3 to be removed. tip = int(self.nodes[0].getblockhash( self.nodes[0].getblockcount() - 1), 16) height = self.nodes[0].getblockcount() for i in range(2): block = create_block(tip, create_coinbase(height), cur_time) block.nVersion = 3 block.rehash() block.solve() tip = block.sha256 height += 1 self.nodes[0].submitblock(ToHex(block)) cur_time += 1 mempool = self.nodes[0].getrawmempool() assert(tx3.hash not in mempool) assert(tx2.hash in mempool) # Reset the chain and get rid of the mocktimed-blocks self.nodes[0].setmocktime(0) self.nodes[0].invalidateblock( self.nodes[0].getblockhash(cur_height + 1)) self.nodes[0].generate(10) # Make sure that BIP68 isn't being used to validate blocks, prior to # versionbits activation. If more blocks are mined prior to this test # being run, then it's possible the test has activated the soft fork, and # this test should be moved to run earlier, or deleted. def test_bip68_not_consensus(self): assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active') txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction tx2 = CTransaction() tx2.nVersion = 1 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [ CTxOut(int(tx1.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] # sign tx2 tx2_raw = self.nodes[0].signrawtransaction( ToHex(tx2), None, None, "ALL|FORKID")["hex"] tx2 = FromHex(tx2, tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(ToHex(tx2)) # Now make an invalid spend of tx2 according to BIP68 sequence_value = 100 # 100 block relative locktime tx3 = CTransaction() tx3.nVersion = 2 tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)] tx3.vout = [ CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a']))] tx3.rehash() try: self.nodes[0].sendrawtransaction(ToHex(tx3)) except JSONRPCException as exp: assert_equal(exp.error["message"], NOT_FINAL_ERROR) else: assert(False) # make a block that violates bip68; ensure that the tip updates tip = int(self.nodes[0].getbestblockhash(), 16) block = create_block( tip, create_coinbase(self.nodes[0].getblockcount() + 1)) block.nVersion = 3 block.vtx.extend([tx1, tx2, tx3]) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.nodes[0].submitblock(ToHex(block)) assert_equal(self.nodes[0].getbestblockhash(), block.hash) def activateCSV(self): # activation should happen at block height 432 (3 periods) min_activation_height = 432 height = self.nodes[0].getblockcount() assert(height < 432) self.nodes[0].generate(432 - height) assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active') sync_blocks(self.nodes) # Use self.nodes[1] to test standardness relay policy def test_version2_relay(self, before_activation): inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.0} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] tx = FromHex(CTransaction(), rawtxfund) tx.nVersion = 2 tx_signed = self.nodes[1].signrawtransaction( ToHex(tx), None, None, "ALL|FORKID")["hex"] try: tx_id = self.nodes[1].sendrawtransaction(tx_signed) assert(before_activation == False) except: assert(before_activation) if __name__ == '__main__': BIP68Test().main() diff --git a/qa/rpc-tests/bip9-softforks.py b/qa/rpc-tests/bip9-softforks.py index 986892402..cbe02ebb2 100755 --- a/qa/rpc-tests/bip9-softforks.py +++ b/qa/rpc-tests/bip9-softforks.py @@ -1,262 +1,258 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.blockstore import BlockStore from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block from test_framework.comptool import TestInstance, TestManager from test_framework.script import CScript, OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP from io import BytesIO import time import itertools ''' This test is meant to exercise BIP forks Connect to a single node. regtest lock-in with 108/144 block signalling activation after a further 144 blocks mine 2 block and save coinbases for later use mine 141 blocks to transition from DEFINED to STARTED mine 100 blocks signalling readiness and 44 not in order to fail to change state this period mine 108 blocks signalling readiness and 36 blocks not signalling readiness (STARTED->LOCKED_IN) mine a further 143 blocks (LOCKED_IN) test that enforcement has not triggered (which triggers ACTIVE) test that enforcement has triggered ''' class BIP9SoftForksTest(ComparisonTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 - - def setup_network(self): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1']], - binary=[self.options.testbinary]) + self.extra_args = [['-whitelist=127.0.0.1']] def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread self.test.run() def create_transaction(self, node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) tx = CTransaction() f = BytesIO(hex_str_to_bytes(rawtx)) tx.deserialize(f) tx.nVersion = 2 return tx def sign_transaction(self, node, tx): signresult = node.signrawtransaction( bytes_to_hex_str(tx.serialize()), None, None, "ALL|FORKID") tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def generate_blocks(self, number, version, test_blocks=[]): for i in range(number): block = create_block( self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = version block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 self.height += 1 return test_blocks def get_bip9_status(self, key): info = self.nodes[0].getblockchaininfo() return info['bip9_softforks'][key] def test_BIP(self, bipName, activated_version, invalidate, invalidatePostSignature, bitno): assert_equal(self.get_bip9_status(bipName)['status'], 'defined') assert_equal(self.get_bip9_status(bipName)['since'], 0) # generate some coins for later self.coinbase_blocks = self.nodes[0].generate(2) self.height = 3 # height of the next block to build self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = int(time.time()) assert_equal(self.get_bip9_status(bipName)['status'], 'defined') assert_equal(self.get_bip9_status(bipName)['since'], 0) tmpl = self.nodes[0].getblocktemplate({}) assert(bipName not in tmpl['rules']) assert(bipName not in tmpl['vbavailable']) assert_equal(tmpl['vbrequired'], 0) assert_equal(tmpl['version'], 0x20000000) # Test 1 # Advance from DEFINED to STARTED test_blocks = self.generate_blocks(141, 4) yield TestInstance(test_blocks, sync_every_block=False) assert_equal(self.get_bip9_status(bipName)['status'], 'started') assert_equal(self.get_bip9_status(bipName)['since'], 144) tmpl = self.nodes[0].getblocktemplate({}) assert(bipName not in tmpl['rules']) assert_equal(tmpl['vbavailable'][bipName], bitno) assert_equal(tmpl['vbrequired'], 0) assert(tmpl['version'] & activated_version) # Test 2 # Fail to achieve LOCKED_IN 100 out of 144 signal bit 1 # using a variety of bits to simulate multiple parallel softforks test_blocks = self.generate_blocks( 50, activated_version) # 0x20000001 (signalling ready) test_blocks = self.generate_blocks( 20, 4, test_blocks) # 0x00000004 (signalling not) test_blocks = self.generate_blocks( 50, activated_version, test_blocks) # 0x20000101 (signalling ready) test_blocks = self.generate_blocks( 24, 4, test_blocks) # 0x20010000 (signalling not) yield TestInstance(test_blocks, sync_every_block=False) assert_equal(self.get_bip9_status(bipName)['status'], 'started') assert_equal(self.get_bip9_status(bipName)['since'], 144) tmpl = self.nodes[0].getblocktemplate({}) assert(bipName not in tmpl['rules']) assert_equal(tmpl['vbavailable'][bipName], bitno) assert_equal(tmpl['vbrequired'], 0) assert(tmpl['version'] & activated_version) # Test 3 # 108 out of 144 signal bit 1 to achieve LOCKED_IN # using a variety of bits to simulate multiple parallel softforks # 0x20000001 (signalling ready) test_blocks = self.generate_blocks(58, activated_version) # 0x00000004 (signalling not) test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x20000101 (signalling ready) test_blocks = self.generate_blocks(50, activated_version, test_blocks) # 0x20010000 (signalling not) test_blocks = self.generate_blocks(10, 4, test_blocks) yield TestInstance(test_blocks, sync_every_block=False) assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') assert_equal(self.get_bip9_status(bipName)['since'], 432) tmpl = self.nodes[0].getblocktemplate({}) assert(bipName not in tmpl['rules']) # Test 4 # 143 more version 536870913 blocks (waiting period-1) test_blocks = self.generate_blocks(143, 4) yield TestInstance(test_blocks, sync_every_block=False) assert_equal(self.get_bip9_status(bipName)['status'], 'locked_in') assert_equal(self.get_bip9_status(bipName)['since'], 432) tmpl = self.nodes[0].getblocktemplate({}) assert(bipName not in tmpl['rules']) # Test 5 # Check that the new rule is enforced spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[0], self.nodeaddress, 1.0) invalidate(spendtx) spendtx = self.sign_transaction(self.nodes[0], spendtx) spendtx.rehash() invalidatePostSignature(spendtx) spendtx.rehash() block = create_block( self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = activated_version block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 self.height += 1 yield TestInstance([[block, True]]) assert_equal(self.get_bip9_status(bipName)['status'], 'active') assert_equal(self.get_bip9_status(bipName)['since'], 576) tmpl = self.nodes[0].getblocktemplate({}) assert(bipName in tmpl['rules']) assert(bipName not in tmpl['vbavailable']) assert_equal(tmpl['vbrequired'], 0) assert(not (tmpl['version'] & (1 << bitno))) # Test 6 # Check that the new sequence lock rules are enforced spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) invalidate(spendtx) spendtx = self.sign_transaction(self.nodes[0], spendtx) spendtx.rehash() invalidatePostSignature(spendtx) spendtx.rehash() block = create_block( self.tip, create_coinbase(self.height), self.last_block_time + 1) block.nVersion = 5 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) # Restart all self.test.block_store.close() stop_nodes(self.nodes) shutil.rmtree(self.options.tmpdir) self.setup_chain() self.setup_network() self.test.block_store = BlockStore(self.options.tmpdir) self.test.clear_all_connections() self.test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread def get_tests(self): for test in itertools.chain( self.test_BIP( 'csv', 0x20000001, self.sequence_lock_invalidate, self.donothing, 0), self.test_BIP( 'csv', 0x20000001, self.mtp_invalidate, self.donothing, 0), self.test_BIP( 'csv', 0x20000001, self.donothing, self.csv_invalidate, 0) ): yield test def donothing(self, tx): return def csv_invalidate(self, tx): '''Modify the signature in vin 0 of the tx to fail CSV Prepends -1 CSV DROP in the scriptSig itself. ''' tx.vin[0].scriptSig = CScript([OP_1NEGATE, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(tx.vin[0].scriptSig))) def sequence_lock_invalidate(self, tx): '''Modify the nSequence to make it fails once sequence lock rule is activated (high timespan) ''' tx.vin[0].nSequence = 0x00FFFFFF tx.nLockTime = 0 def mtp_invalidate(self, tx): '''Modify the nLockTime to make it fails once MTP rule is activated ''' # Disable Sequence lock, Activate nLockTime tx.vin[0].nSequence = 0x90FFFFFF tx.nLockTime = self.last_block_time if __name__ == '__main__': BIP9SoftForksTest().main() diff --git a/qa/rpc-tests/bipdersig-p2p.py b/qa/rpc-tests/bipdersig-p2p.py index 4b7b9f828..8415a0c55 100755 --- a/qa/rpc-tests/bipdersig-p2p.py +++ b/qa/rpc-tests/bipdersig-p2p.py @@ -1,180 +1,173 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block from test_framework.comptool import TestInstance, TestManager from test_framework.script import CScript from io import BytesIO import time # A canonical signature consists of: # <30> <02> <02> def unDERify(tx): ''' Make the signature in vin 0 of a tx non-DER-compliant, by adding padding after the S-value. ''' scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): newscript.append(i[0:-1] + b'\0' + i[-1:]) else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) ''' This test is meant to exercise BIP66 (DER SIG). Connect to a single node. Mine 2 (version 2) blocks (save the coinbases for later). Generate 98 more version 2 blocks, verify the node accepts. Mine 749 version 3 blocks, verify the node accepts. Check that the new DERSIG rules are not enforced on the 750th version 3 block. Check that the new DERSIG rules are enforced on the 751st version 3 block. Mine 199 new version blocks. Mine 1 old-version block. Mine 1 new version block. Mine 1 old version block, see that the node rejects. ''' class BIP66Test(ComparisonTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 - def setup_network(self): - # Must set the blockversion for this test - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1', - '-blockversion=2']], - binary=[self.options.testbinary]) - def run_test(self): test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() test.run() def create_transaction(self, node, coinbase, to_address, amount): from_txid = node.getblock(coinbase)['tx'][0] inputs = [{"txid": from_txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def get_tests(self): self.coinbase_blocks = self.nodes[0].generate(2) height = 3 # height of the next block to build self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.nodeaddress = self.nodes[0].getnewaddress() self.last_block_time = int(time.time()) ''' 298 more version 2 blocks ''' test_blocks = [] for i in range(298): block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 2 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 749 version 3 blocks ''' test_blocks = [] for i in range(749): block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 199 new version blocks on last valid tip ''' test_blocks = [] for i in range(199): block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() test_blocks.append([block, True]) self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance(test_blocks, sync_every_block=False) ''' Mine 1 old version block ''' block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 2 block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Mine 1 new version block ''' block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.rehash() block.solve() self.last_block_time += 1 self.tip = block.sha256 height += 1 yield TestInstance([[block, True]]) ''' Check that the new DERSIG rules are enforced in the 951st version 3 block. ''' spendtx = self.create_transaction(self.nodes[0], self.coinbase_blocks[1], self.nodeaddress, 1.0) unDERify(spendtx) spendtx.rehash() block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 3 block.vtx.append(spendtx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) ''' Mine 1 old version block, should be invalid ''' block = create_block( self.tip, create_coinbase(height), self.last_block_time + 1) block.nVersion = 2 block.rehash() block.solve() self.last_block_time += 1 yield TestInstance([[block, False]]) if __name__ == '__main__': BIP66Test().main() diff --git a/qa/rpc-tests/bipdersig.py b/qa/rpc-tests/bipdersig.py index 4a6b194d8..941106093 100755 --- a/qa/rpc-tests/bipdersig.py +++ b/qa/rpc-tests/bipdersig.py @@ -1,98 +1,93 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test the BIP66 changeover logic # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class BIP66Test(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False + self.extra_args = [[], ["-blockversion=2"], ["-blockversion=3"]] def setup_network(self): - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, [])) - self.nodes.append( - start_node(1, self.options.tmpdir, ["-blockversion=2"])) - self.nodes.append( - start_node(2, self.options.tmpdir, ["-blockversion=3"])) + self.setup_nodes() connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) - self.is_network_split = False self.sync_all() def run_test(self): cnt = self.nodes[0].getblockcount() # Mine some old-version blocks self.nodes[1].generate(100) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 100): raise AssertionError("Failed to mine 100 version=2 blocks") # Mine 750 new-version blocks for i in range(15): self.nodes[2].generate(50) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 850): raise AssertionError("Failed to mine 750 version=3 blocks") # TODO: check that new DERSIG rules are not enforced # Mine 1 new-version block self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 851): raise AssertionError("Failed to mine a version=3 blocks") # TODO: check that new DERSIG rules are enforced # Mine 198 new-version blocks for i in range(2): self.nodes[2].generate(99) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1049): raise AssertionError("Failed to mine 198 version=3 blocks") # Mine 1 old-version block self.nodes[1].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1050): raise AssertionError( "Failed to mine a version=2 block after 949 version=3 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError("Failed to mine a version=3 block") # Mine 1 old-version blocks try: self.nodes[1].generate(1) raise AssertionError( "Succeeded to mine a version=2 block after 950 version=3 blocks") except JSONRPCException: pass self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1051): raise AssertionError( "Accepted a version=2 block after 950 version=3 blocks") # Mine 1 new-version blocks self.nodes[2].generate(1) self.sync_all() if (self.nodes[0].getblockcount() != cnt + 1052): raise AssertionError("Failed to mine a version=3 block") if __name__ == '__main__': BIP66Test().main() diff --git a/qa/rpc-tests/blockchain.py b/qa/rpc-tests/blockchain.py index a244e7dc6..57724f043 100755 --- a/qa/rpc-tests/blockchain.py +++ b/qa/rpc-tests/blockchain.py @@ -1,90 +1,84 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test RPC calls related to blockchain state. Tests correspond to code in # rpc/blockchain.cpp. # from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.authproxy import JSONRPCException from test_framework.util import ( assert_equal, assert_raises, assert_is_hex_string, assert_is_hash_string, start_nodes, connect_nodes_bi, ) class BlockchainTest(BitcoinTestFramework): """ Test blockchain-related RPC calls: - gettxoutsetinfo - verifychain """ def __init__(self): super().__init__() self.setup_clean_chain = False - self.num_nodes = 2 - - def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - connect_nodes_bi(self.nodes, 0, 1) - self.is_network_split = False - self.sync_all() + self.num_nodes = 1 def run_test(self): self._test_gettxoutsetinfo() self._test_getblockheader() self.nodes[0].verifychain(4, 0) def _test_gettxoutsetinfo(self): node = self.nodes[0] res = node.gettxoutsetinfo() assert_equal(res['total_amount'], Decimal('8725.00000000')) assert_equal(res['transactions'], 200) assert_equal(res['height'], 200) assert_equal(res['txouts'], 200) assert_equal(res['bytes_serialized'], 13924), assert_equal(len(res['bestblock']), 64) assert_equal(len(res['hash_serialized']), 64) def _test_getblockheader(self): node = self.nodes[0] assert_raises( JSONRPCException, lambda: node.getblockheader('nonsense')) besthash = node.getbestblockhash() secondbesthash = node.getblockhash(199) header = node.getblockheader(besthash) assert_equal(header['hash'], besthash) assert_equal(header['height'], 200) assert_equal(header['confirmations'], 1) assert_equal(header['previousblockhash'], secondbesthash) assert_is_hex_string(header['chainwork']) assert_is_hash_string(header['hash']) assert_is_hash_string(header['previousblockhash']) assert_is_hash_string(header['merkleroot']) assert_is_hash_string(header['bits'], length=None) assert isinstance(header['time'], int) assert isinstance(header['mediantime'], int) assert isinstance(header['nonce'], int) assert isinstance(header['version'], int) assert isinstance(int(header['versionHex'], 16), int) assert isinstance(header['difficulty'], Decimal) if __name__ == '__main__': BlockchainTest().main() diff --git a/qa/rpc-tests/decodescript.py b/qa/rpc-tests/decodescript.py index b516c150f..b68417f55 100755 --- a/qa/rpc-tests/decodescript.py +++ b/qa/rpc-tests/decodescript.py @@ -1,229 +1,225 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import * from io import BytesIO class DecodeScriptTest(BitcoinTestFramework): """Tests decoding scripts via RPC command "decodescript".""" def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - self.is_network_split = False - def decodescript_script_sig(self): signature = '304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' push_signature = '48' + signature public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key # below are test cases for all of the standard transaction types # 1) P2PK scriptSig # the scriptSig of a public key scriptPubKey simply pushes a signature # onto the stack rpc_result = self.nodes[0].decodescript(push_signature) assert_equal(signature, rpc_result['asm']) # 2) P2PKH scriptSig rpc_result = self.nodes[0].decodescript( push_signature + push_public_key) assert_equal(signature + ' ' + public_key, rpc_result['asm']) # 3) multisig scriptSig # this also tests the leading portion of a P2SH multisig scriptSig # OP_0 rpc_result = self.nodes[0].decodescript( '00' + push_signature + push_signature) assert_equal('0 ' + signature + ' ' + signature, rpc_result['asm']) # 4) P2SH scriptSig # an empty P2SH redeemScript is valid and makes for a very simple test case. # thus, such a spending scriptSig would just need to pass the outer redeemScript # hash test and leave true on the top of the stack. rpc_result = self.nodes[0].decodescript('5100') assert_equal('1 0', rpc_result['asm']) # 5) null data scriptSig - no such thing because null data scripts can not be spent. # thus, no test case for that standard transaction type is here. def decodescript_script_pub_key(self): public_key = '03b0da749730dc9b4b1f4a14d6902877a92541f5368778853d9c4a0cb7802dcfb2' push_public_key = '21' + public_key public_key_hash = '11695b6cd891484c2d49ec5aa738ec2b2f897777' push_public_key_hash = '14' + public_key_hash # below are test cases for all of the standard transaction types # 1) P2PK scriptPubKey # OP_CHECKSIG rpc_result = self.nodes[0].decodescript(push_public_key + 'ac') assert_equal(public_key + ' OP_CHECKSIG', rpc_result['asm']) # 2) P2PKH scriptPubKey # OP_DUP OP_HASH160 OP_EQUALVERIFY OP_CHECKSIG rpc_result = self.nodes[0].decodescript( '76a9' + push_public_key_hash + '88ac') assert_equal('OP_DUP OP_HASH160 ' + public_key_hash + ' OP_EQUALVERIFY OP_CHECKSIG', rpc_result['asm']) # 3) multisig scriptPubKey # OP_CHECKMULTISIG # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even # though it is unrealistic. rpc_result = self.nodes[0].decodescript( '52' + push_public_key + push_public_key + push_public_key + '53ae') assert_equal('2 ' + public_key + ' ' + public_key + ' ' + public_key + ' 3 OP_CHECKMULTISIG', rpc_result['asm']) # 4) P2SH scriptPubKey # OP_HASH160 OP_EQUAL. # push_public_key_hash here should actually be the hash of a redeem script. # but this works the same for purposes of this test. rpc_result = self.nodes[0].decodescript( 'a9' + push_public_key_hash + '87') assert_equal( 'OP_HASH160 ' + public_key_hash + ' OP_EQUAL', rpc_result['asm']) # 5) null data scriptPubKey # use a signature look-alike here to make sure that we do not decode random data as a signature. # this matters if/when signature sighash decoding comes along. # would want to make sure that no such decoding takes place in this # case. signature_imposter = '48304502207fa7a6d1e0ee81132a269ad84e68d695483745cde8b541e3bf630749894e342a022100c1f7ab20e13e22fb95281a870f3dcf38d782e53023ee313d741ad0cfbc0c509001' # OP_RETURN rpc_result = self.nodes[0].decodescript('6a' + signature_imposter) assert_equal('OP_RETURN ' + signature_imposter[2:], rpc_result['asm']) # 6) a CLTV redeem script. redeem scripts are in-effect scriptPubKey scripts, so adding a test here. # OP_NOP2 is also known as OP_CHECKLOCKTIMEVERIFY. # just imagine that the pub keys used below are different. # for our purposes here it does not matter that they are the same even though it is unrealistic. # # OP_IF # OP_CHECKSIGVERIFY # OP_ELSE # OP_CHECKLOCKTIMEVERIFY OP_DROP # OP_ENDIF # OP_CHECKSIG # # lock until block 500,000 rpc_result = self.nodes[0].decodescript( '63' + push_public_key + 'ad670320a107b17568' + push_public_key + 'ac') assert_equal('OP_IF ' + public_key + ' OP_CHECKSIGVERIFY OP_ELSE 500000 OP_CHECKLOCKTIMEVERIFY OP_DROP OP_ENDIF ' + public_key + ' OP_CHECKSIG', rpc_result['asm']) def decoderawtransaction_asm_sighashtype(self): """Tests decoding scripts via RPC command "decoderawtransaction". This test is in with the "decodescript" tests because they are testing the same "asm" script decodes. """ # this test case uses a random plain vanilla mainnet transaction with a # single P2PKH input and output tx = '0100000001696a20784a2c70143f634e95227dbdfdf0ecd51647052e70854512235f5986ca010000008a47304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb014104d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536ffffffff0100e1f505000000001976a914eb6c6e0cdb2d256a32d97b8df1fc75d1920d9bca88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( '304402207174775824bec6c2700023309a168231ec80b82c6069282f5133e6f11cbb04460220570edc55c7c5da2ca687ebd0372d3546ebc3f810516a002350cac72dfe192dfb[ALL] 04d3f898e6487787910a690410b7a917ef198905c27fb9d3b0a42da12aceae0544fc7088d239d9a48f2828a15a09e84043001f27cc80d162cb95404e1210161536', rpc_result['vin'][0]['scriptSig']['asm']) # this test case uses a mainnet transaction that has a P2SH input and both P2PKH and P2SH outputs. # it's from James D'Angelo's awesome introductory videos about multisig: https://www.youtube.com/watch?v=zIbUSaZBJgU and https://www.youtube.com/watch?v=OSA1pwlaypc # verify that we have not altered scriptPubKey decoding. tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914dc863734a218bfe83ef770ee9d41a27f824a6e5688acee2a02000000000017a9142a5edea39971049a540474c6a99edf0aa4074c588700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( '8e3730608c3b0bb5df54f09076e196bc292a8e39a78e73b44b6ba08c78f5cbb0', rpc_result['txid']) assert_equal( '0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal( 'OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal( 'OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) txSave = CTransaction() txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) # make sure that a specifically crafted op_return value will not pass # all the IsDERSignature checks and then get decoded as a sighash type tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal('OP_RETURN 300602010002010001', rpc_result['vout'][0]['scriptPubKey']['asm']) # verify that we have not altered scriptPubKey processing even of a # specially crafted P2PKH pubkeyhash and P2SH redeem script hash that # is made to pass the der signature checks tx = '01000000018d1f5635abd06e2c7e2ddf58dc85b3de111e4ad6e0ab51bb0dcf5e84126d927300000000fdfe0000483045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea01483045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75014c695221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53aeffffffff02611e0000000000001976a914301102070101010101010102060101010101010188acee2a02000000000017a91430110207010101010101010206010101010101018700000000' rpc_result = self.nodes[0].decoderawtransaction(tx) assert_equal( 'OP_DUP OP_HASH160 3011020701010101010101020601010101010101 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal( 'OP_HASH160 3011020701010101010101020601010101010101 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) # some more full transaction tests of varying specific scriptSigs. used instead of # tests in decodescript_script_sig because the decodescript RPC is specifically # for working on scriptPubKeys (argh!). push_signature = bytes_to_hex_str( txSave.vin[0].scriptSig)[2:(0x48 * 2 + 4)] signature = push_signature[2:] der_signature = signature[:-2] signature_sighash_decoded = der_signature + '[ALL]' signature_2 = der_signature + '82' push_signature_2 = '48' + signature_2 signature_2_sighash_decoded = der_signature + '[NONE|ANYONECANPAY]' # 1) P2PK scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature) rpc_result = self.nodes[0].decoderawtransaction( bytes_to_hex_str(txSave.serialize())) assert_equal(signature_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # make sure that the sighash decodes come out correctly for a more # complex / lesser used case. txSave.vin[0].scriptSig = hex_str_to_bytes(push_signature_2) rpc_result = self.nodes[0].decoderawtransaction( bytes_to_hex_str(txSave.serialize())) assert_equal(signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 2) multisig scriptSig txSave.vin[0].scriptSig = hex_str_to_bytes( '00' + push_signature + push_signature_2) rpc_result = self.nodes[0].decoderawtransaction( bytes_to_hex_str(txSave.serialize())) assert_equal('0 ' + signature_sighash_decoded + ' ' + signature_2_sighash_decoded, rpc_result['vin'][0]['scriptSig']['asm']) # 3) test a scriptSig that contains more than push operations. # in fact, it contains an OP_RETURN with data specially crafted to # cause improper decode if the code does not catch it. txSave.vin[0].scriptSig = hex_str_to_bytes( '6a143011020701010101010101020601010101010101') rpc_result = self.nodes[0].decoderawtransaction( bytes_to_hex_str(txSave.serialize())) assert_equal('OP_RETURN 3011020701010101010101020601010101010101', rpc_result['vin'][0]['scriptSig']['asm']) def run_test(self): self.decodescript_script_sig() self.decodescript_script_pub_key() self.decoderawtransaction_asm_sighashtype() if __name__ == '__main__': DecodeScriptTest().main() diff --git a/qa/rpc-tests/disablewallet.py b/qa/rpc-tests/disablewallet.py index b4bf4e3e3..19b162e36 100755 --- a/qa/rpc-tests/disablewallet.py +++ b/qa/rpc-tests/disablewallet.py @@ -1,53 +1,48 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Exercise API with -disablewallet. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class DisableWalletTest (BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - - def setup_network(self, split=False): - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, [['-disablewallet']]) - self.is_network_split = False - self.sync_all() + self.extra_args = [["-disablewallet"]] def run_test(self): # Check regression: # https://github.com/bitcoin/bitcoin/issues/6963#issuecomment-154548880 x = self.nodes[0].validateaddress('3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') assert(x['isvalid'] == False) x = self.nodes[0].validateaddress('mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') assert(x['isvalid'] == True) # Checking mining to an address without a wallet try: self.nodes[0].generatetoaddress( 1, 'mneYUmWYsuk7kySiURxCi3AGxrAqZxLgPZ') except JSONRPCException as e: assert("Invalid address" not in e.error['message']) assert( "ProcessNewBlock, block not accepted" not in e.error['message']) assert("Couldn't create new block" not in e.error['message']) try: self.nodes[0].generatetoaddress( 1, '3J98t1WpEZ73CNmQviecrnyiWrnqRhWNLy') raise AssertionError("Must not mine to invalid address!") except JSONRPCException as e: assert("Invalid address" in e.error['message']) if __name__ == '__main__': DisableWalletTest().main() diff --git a/qa/rpc-tests/disconnect_ban.py b/qa/rpc-tests/disconnect_ban.py index f2ec97835..5b6d71fa6 100755 --- a/qa/rpc-tests/disconnect_ban.py +++ b/qa/rpc-tests/disconnect_ban.py @@ -1,131 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node disconnect and ban behavior""" from test_framework.mininode import wait_until from test_framework.test_framework import BitcoinTestFramework from test_framework.util import (assert_equal, assert_raises_jsonrpc, connect_nodes_bi, start_node, stop_node, ) class DisconnectBanTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False - def setup_network(self): - self.nodes = self.setup_nodes() - connect_nodes_bi(self.nodes, 0, 1) - def run_test(self): self.log.info("Test setban and listbanned RPCs") self.log.info("setban: successfully ban single IP address") # node1 should have 2 connections to node0 at this point assert_equal(len(self.nodes[1].getpeerinfo()), 2) self.nodes[1].setban("127.0.0.1", "add") wait_until(lambda: len(self.nodes[1].getpeerinfo()) == 0) # all nodes must be disconnected at this point assert_equal(len(self.nodes[1].getpeerinfo()), 0) assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("clearbanned: successfully clear ban list") self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].setban("127.0.0.0/24", "add") self.log.info("setban: fail to ban an already banned subnet") assert_equal(len(self.nodes[1].listbanned()), 1) assert_raises_jsonrpc( -23, "IP/Subnet already banned", self.nodes[1].setban, "127.0.0.1", "add") self.log.info("setban: fail to ban an invalid subnet") assert_raises_jsonrpc( -30, "Error: Invalid IP/Subnet", self.nodes[1].setban, "127.0.0.1/42", "add") # still only one banned ip because 127.0.0.1 is within the range of # 127.0.0.0/24 assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: fail to unban a non-banned subnet") assert_raises_jsonrpc( -30, "Error: Unban failed", self.nodes[1].setban, "127.0.0.1", "remove") assert_equal(len(self.nodes[1].listbanned()), 1) self.log.info("setban remove: successfully unban subnet") self.nodes[1].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[1].listbanned()), 0) self.nodes[1].clearbanned() assert_equal(len(self.nodes[1].listbanned()), 0) self.log.info("setban: test persistence across node restart") self.nodes[1].setban("127.0.0.0/32", "add") self.nodes[1].setban("127.0.0.0/24", "add") # ban for 1 seconds self.nodes[1].setban("192.168.0.1", "add", 1) # ban for 1000 seconds self.nodes[1].setban( "2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) listBeforeShutdown = self.nodes[1].listbanned() assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) wait_until(lambda: len(self.nodes[1].listbanned()) == 3) stop_node(self.nodes[1], 1) self.nodes[1] = start_node(1, self.options.tmpdir) listAfterShutdown = self.nodes[1].listbanned() assert_equal("127.0.0.0/24", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/32", listAfterShutdown[1]['address']) assert_equal("/19" in listAfterShutdown[2]['address'], True) # Clear ban lists self.nodes[1].clearbanned() connect_nodes_bi(self.nodes, 0, 1) self.log.info("Test disconnectnode RPCs") self.log.info( "disconnectnode: fail to disconnect when calling with address and nodeid") address1 = self.nodes[0].getpeerinfo()[0]['addr'] node1 = self.nodes[0].getpeerinfo()[0]['addr'] assert_raises_jsonrpc( -32602, "Only one of address and nodeid should be provided.", self.nodes[0].disconnectnode, address=address1, nodeid=node1) self.log.info( "disconnectnode: fail to disconnect when calling with junk address") assert_raises_jsonrpc(-29, "Node not found in connected nodes", self.nodes[0].disconnectnode, address="221B Baker Street") self.log.info( "disconnectnode: successfully disconnect node by address") address1 = self.nodes[0].getpeerinfo()[0]['addr'] self.nodes[0].disconnectnode(address=address1) wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1) assert not [node for node in self.nodes[0] .getpeerinfo() if node['addr'] == address1] self.log.info("disconnectnode: successfully reconnect node") # reconnect the node connect_nodes_bi(self.nodes, 0, 1) assert_equal(len(self.nodes[0].getpeerinfo()), 2) assert [node for node in self.nodes[0] .getpeerinfo() if node['addr'] == address1] self.log.info( "disconnectnode: successfully disconnect node by node id") id1 = self.nodes[0].getpeerinfo()[0]['id'] self.nodes[0].disconnectnode(nodeid=id1) wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 1) assert not [node for node in self.nodes[ 0].getpeerinfo() if node['id'] == id1] if __name__ == '__main__': DisconnectBanTest().main() diff --git a/qa/rpc-tests/forknotify.py b/qa/rpc-tests/forknotify.py index c053dbdd4..3a17236b8 100755 --- a/qa/rpc-tests/forknotify.py +++ b/qa/rpc-tests/forknotify.py @@ -1,69 +1,66 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test -alertnotify # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class ForkNotifyTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False - alert_filename = None # Set by setup_network - def setup_network(self): self.nodes = [] self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") with open(self.alert_filename, 'w', encoding='utf8'): pass # Just open then close to create zero-length file self.nodes.append(start_node(0, self.options.tmpdir, ["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])) # Node1 mines block.version=211 blocks self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=211"])) connect_nodes(self.nodes[1], 0) - self.is_network_split = False self.sync_all() def run_test(self): # Mine 51 up-version blocks self.nodes[1].generate(51) self.sync_all() # -alertnotify should trigger on the 51'st, # but mine and sync another to give # -alertnotify time to write self.nodes[1].generate(1) self.sync_all() with open(self.alert_filename, 'r', encoding='utf8') as f: alert_text = f.read() if len(alert_text) == 0: raise AssertionError( "-alertnotify did not warn of up-version blocks") # Mine more up-version blocks, should not get more alerts: self.nodes[1].generate(1) self.sync_all() self.nodes[1].generate(1) self.sync_all() with open(self.alert_filename, 'r', encoding='utf8') as f: alert_text2 = f.read() if alert_text != alert_text2: raise AssertionError( "-alertnotify excessive warning of up-version blocks") if __name__ == '__main__': ForkNotifyTest().main() diff --git a/qa/rpc-tests/fundrawtransaction.py b/qa/rpc-tests/fundrawtransaction.py index fdd596860..2b1c72765 100755 --- a/qa/rpc-tests/fundrawtransaction.py +++ b/qa/rpc-tests/fundrawtransaction.py @@ -1,778 +1,774 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * def get_unspent(listunspent, amount): for utx in listunspent: if utx['amount'] == amount: return utx raise AssertionError( 'Could not find unspent with amount={}'.format(amount)) class RawTransactionsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) + self.setup_nodes() connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) connect_nodes_bi(self.nodes, 0, 3) - self.is_network_split = False - self.sync_all() - def run_test(self): min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee'] # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) # if the fee's positive delta is higher than this value tests will fail, # neg. delta always fail the tests. # The size of the signature of every input may be at most 2 bytes larger # than a minimum sized signature. # = 2 bytes * minRelayTxFeePerByte feeTolerance = 2 * min_relay_tx_fee / 1000 self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(121) self.sync_all() watchonly_address = self.nodes[0].getnewaddress() watchonly_pubkey = self.nodes[ 0].validateaddress(watchonly_address)["pubkey"] watchonly_amount = Decimal(200) self.nodes[3].importpubkey(watchonly_pubkey, "", True) watchonly_txid = self.nodes[0].sendtoaddress( watchonly_address, watchonly_amount) self.nodes[0].sendtoaddress( self.nodes[3].getnewaddress(), watchonly_amount / 10) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) self.nodes[0].generate(1) self.sync_all() # # simple test # # inputs = [] outputs = {self.nodes[0].getnewaddress(): 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) # test that we have enough inputs # # simple test with two coins # # inputs = [] outputs = {self.nodes[0].getnewaddress(): 2.2} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) # test if we have enough inputs # # simple test with two coins # # inputs = [] outputs = {self.nodes[0].getnewaddress(): 2.6} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert(len(dec_tx['vin']) > 0) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') # # simple test with two outputs # # inputs = [] outputs = { self.nodes[0].getnewaddress(): 2.6, self.nodes[1].getnewaddress(): 2.5} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert(len(dec_tx['vin']) > 0) assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '') # # test a fundrawtransaction with a VIN greater than the required amount # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] # compare vin total and totalout+fee assert_equal(fee + totalOut, utx['amount']) # # test a fundrawtransaction with which will not get a change output # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = { self.nodes[0].getnewaddress(): Decimal(5.0) - fee - feeTolerance} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 for out in dec_tx['vout']: totalOut += out['value'] assert_equal(rawtxfund['changepos'], -1) assert_equal(fee + totalOut, utx['amount']) # compare vin total and totalout+fee # # test a fundrawtransaction with an invalid option # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): Decimal(4.0)} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[ 2].fundrawtransaction, rawtx, {'foo': 'bar'}) # # test a fundrawtransaction with an invalid change address # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): Decimal(4.0)} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_raises_jsonrpc( -5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress': 'foobar'}) # # test a fundrawtransaction with a provided change address # # utx = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): Decimal(4.0)} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) change = self.nodes[2].getnewaddress() assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[ 2].fundrawtransaction, rawtx, {'changeAddress': change, 'changePosition': 2}) rawtxfund = self.nodes[2].fundrawtransaction( rawtx, {'changeAddress': change, 'changePosition': 0}) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) out = dec_tx['vout'][0] assert_equal(change, out['scriptPubKey']['addresses'][0]) # # test a fundrawtransaction with a VIN smaller than the required amount # # utx = get_unspent(self.nodes[2].listunspent(), 1) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}] outputs = {self.nodes[0].getnewaddress(): 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) # 4-byte version + 1-byte vin count + 36-byte prevout then script_len rawtx = rawtx[:82] + "0100" + rawtx[84:] dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for i, out in enumerate(dec_tx['vout']): totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts += 1 else: assert_equal(i, rawtxfund['changepos']) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex']) assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) # # test a fundrawtransaction with two VINs # # utx = get_unspent(self.nodes[2].listunspent(), 1) utx2 = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}, {'txid': utx2['txid'], 'vout': utx2['vout']}] outputs = {self.nodes[0].getnewaddress(): 6.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts += 1 assert_equal(matchingOuts, 1) assert_equal(len(dec_tx['vout']), 2) matchingIns = 0 for vinOut in dec_tx['vin']: for vinIn in inputs: if vinIn['txid'] == vinOut['txid']: matchingIns += 1 # we now must see two vins identical to vins given as params assert_equal(matchingIns, 2) # # test a fundrawtransaction with two VINs and two vOUTs # # utx = get_unspent(self.nodes[2].listunspent(), 1) utx2 = get_unspent(self.nodes[2].listunspent(), 5) inputs = [{'txid': utx['txid'], 'vout': utx['vout']}, {'txid': utx2['txid'], 'vout': utx2['vout']}] outputs = { self.nodes[0].getnewaddress(): 6.0, self.nodes[0].getnewaddress(): 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(utx['txid'], dec_tx['vin'][0]['txid']) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) fee = rawtxfund['fee'] dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) totalOut = 0 matchingOuts = 0 for out in dec_tx['vout']: totalOut += out['value'] if out['scriptPubKey']['addresses'][0] in outputs: matchingOuts += 1 assert_equal(matchingOuts, 2) assert_equal(len(dec_tx['vout']), 3) # # test a fundrawtransaction with invalid vin # # listunspent = self.nodes[2].listunspent() inputs = [ {'txid': "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout': 0}] # invalid vin! outputs = {self.nodes[0].getnewaddress(): 1.0} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_raises_jsonrpc( -4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx) # # compare fee of a standard pubkeyhash transaction inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) # # # compare fee of a standard pubkeyhash transaction with multiple # outputs inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.1, self.nodes[1].getnewaddress(): 1.2, self.nodes[1].getnewaddress(): 0.1, self.nodes[ 1].getnewaddress(): 1.3, self.nodes[1].getnewaddress(): 0.2, self.nodes[1].getnewaddress(): 0.3} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendmany("", outputs) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) # # # compare fee of a 2of2 multisig p2sh transaction # create 2of2 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[1].validateaddress(addr2) mSigObj = self.nodes[1].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) inputs = [] outputs = {mSigObj: 1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) # # # compare fee of a standard pubkeyhash transaction # create 4of5 addr addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[1].getnewaddress() addr3 = self.nodes[1].getnewaddress() addr4 = self.nodes[1].getnewaddress() addr5 = self.nodes[1].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[1].validateaddress(addr2) addr3Obj = self.nodes[1].validateaddress(addr3) addr4Obj = self.nodes[1].validateaddress(addr4) addr5Obj = self.nodes[1].validateaddress(addr5) mSigObj = self.nodes[1].addmultisigaddress( 4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']]) inputs = [] outputs = {mSigObj: 1.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) fundedTx = self.nodes[0].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[0].sendtoaddress(mSigObj, 1.1) signedFee = self.nodes[0].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance) # # # spend a 2of2 multisig transaction over fundraw # create 2of2 addr addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # send 1.2 BTC to msig addr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[1].generate(1) self.sync_all() oldBalance = self.nodes[1].getbalance() inputs = [] outputs = {self.nodes[1].getnewaddress(): 1.1} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) fundedTx = self.nodes[2].fundrawtransaction(rawTx) signedTx = self.nodes[2].signrawtransaction( fundedTx['hex'], None, None, "ALL|FORKID") txId = self.nodes[2].sendrawtransaction(signedTx['hex']) self.sync_all() self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal( oldBalance + Decimal('1.10000000'), self.nodes[1].getbalance()) # # locked wallet test self.nodes[1].encryptwallet("test") self.nodes.pop(1) stop_node(self.nodes[0], 0) stop_node(self.nodes[1], 2) stop_node(self.nodes[2], 3) self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) # This test is not meant to test fee estimation and we'd like # to be sure all txs are sent at a consistent desired feerate for node in self.nodes: node.settxfee(min_relay_tx_fee) connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) connect_nodes_bi(self.nodes, 0, 3) - self.is_network_split = False self.sync_all() # drain the keypool self.nodes[1].getnewaddress() inputs = [] outputs = {self.nodes[0].getnewaddress(): 1.1} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) # fund a transaction that requires a new key for the change output # creating the key must be impossible because the wallet is locked assert_raises_jsonrpc( -4, "Insufficient funds", self.nodes[1].fundrawtransaction, rawtx) # refill the keypool self.nodes[1].walletpassphrase("test", 100) self.nodes[1].walletlock() assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[ 1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2) oldBalance = self.nodes[0].getbalance() inputs = [] outputs = {self.nodes[0].getnewaddress(): 1.1} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) # now we need to unlock self.nodes[1].walletpassphrase("test", 600) signedTx = self.nodes[1].signrawtransaction( fundedTx['hex'], None, None, "ALL|FORKID") txId = self.nodes[1].sendrawtransaction(signedTx['hex']) self.nodes[1].generate(1) self.sync_all() # make sure funds are received at node1 assert_equal( oldBalance + Decimal('51.10000000'), self.nodes[0].getbalance()) # # multiple (~19) inputs tx test | Compare fee # # # empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress( self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0, 20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() # fund a tx with ~20 small inputs inputs = [] outputs = { self.nodes[0].getnewaddress(): 0.15, self.nodes[0].getnewaddress(): 0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) # create same transaction over sendtoaddress txId = self.nodes[1].sendmany("", outputs) signedFee = self.nodes[1].getrawmempool(True)[txId]['fee'] # compare fee feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee) assert(feeDelta >= 0 and feeDelta <= feeTolerance * 19) # ~19 inputs # # multiple (~19) inputs tx test | sign/send # # # again, empty node1, send some small coins from node0 to node1 self.nodes[1].sendtoaddress( self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True) self.sync_all() self.nodes[0].generate(1) self.sync_all() for i in range(0, 20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() # fund a tx with ~20 small inputs oldBalance = self.nodes[0].getbalance() inputs = [] outputs = { self.nodes[0].getnewaddress(): 0.15, self.nodes[0].getnewaddress(): 0.04} rawTx = self.nodes[1].createrawtransaction(inputs, outputs) fundedTx = self.nodes[1].fundrawtransaction(rawTx) fundedAndSignedTx = self.nodes[1].signrawtransaction( fundedTx['hex'], None, None, "ALL|FORKID") txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(oldBalance + Decimal('50.19000000'), self.nodes[0].getbalance()) # 0.19+block reward # # test fundrawtransaction with OP_RETURN and no vin # # rawtx = "0100000000010000000000000000066a047465737400000000" dec_tx = self.nodes[2].decoderawtransaction(rawtx) assert_equal(len(dec_tx['vin']), 0) assert_equal(len(dec_tx['vout']), 1) rawtxfund = self.nodes[2].fundrawtransaction(rawtx) dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex']) assert_greater_than(len(dec_tx['vin']), 0) # at least one vin assert_equal(len(dec_tx['vout']), 2) # one change output added # # test a fundrawtransaction using only watchonly # # inputs = [] outputs = {self.nodes[2].getnewaddress(): watchonly_amount / 2} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction( rawtx, {'includeWatching': True}) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 1) assert_equal(res_dec["vin"][0]["txid"], watchonly_txid) assert("fee" in result.keys()) assert_greater_than(result["changepos"], -1) # # test fundrawtransaction using the entirety of watched funds # # inputs = [] outputs = {self.nodes[2].getnewaddress(): watchonly_amount} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) # Backward compatibility test (2nd param is includeWatching) result = self.nodes[3].fundrawtransaction(rawtx, True) res_dec = self.nodes[0].decoderawtransaction(result["hex"]) assert_equal(len(res_dec["vin"]), 2) assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec[ "vin"][1]["txid"] == watchonly_txid) assert_greater_than(result["fee"], 0) assert_greater_than(result["changepos"], -1) assert_equal(result["fee"] + res_dec["vout"][ result["changepos"]]["value"], watchonly_amount / 10) signedtx = self.nodes[3].signrawtransaction( result["hex"], None, None, "ALL|FORKID") assert(not signedtx["complete"]) signedtx = self.nodes[0].signrawtransaction( signedtx["hex"], None, None, "ALL|FORKID") assert(signedtx["complete"]) self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) self.sync_all() # # Test feeRate option # # # Make sure there is exactly one input so coin selection can't skew the # result assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] outputs = {self.nodes[3].getnewaddress(): 1} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = self.nodes[3].fundrawtransaction( rawtx) # uses min_relay_tx_fee (set by settxfee) result2 = self.nodes[3].fundrawtransaction( rawtx, {"feeRate": 2 * min_relay_tx_fee}) result3 = self.nodes[3].fundrawtransaction( rawtx, {"feeRate": 10 * min_relay_tx_fee}) result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex']) assert_fee_amount( result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate) assert_fee_amount( result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate) # # Test address reuse option # # result3 = self.nodes[3].fundrawtransaction( rawtx, {"reserveChangeKey": False}) res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) changeaddress = "" for out in res_dec['vout']: if out['value'] > 1.0: changeaddress += out['scriptPubKey']['addresses'][0] assert(changeaddress != "") nextaddr = self.nodes[3].getnewaddress() # frt should not have removed the key from the keypool assert(changeaddress == nextaddr) result3 = self.nodes[3].fundrawtransaction(rawtx) res_dec = self.nodes[0].decoderawtransaction(result3["hex"]) changeaddress = "" for out in res_dec['vout']: if out['value'] > 1.0: changeaddress += out['scriptPubKey']['addresses'][0] assert(changeaddress != "") nextaddr = self.nodes[3].getnewaddress() # Now the change address key should be removed from the keypool assert(changeaddress != nextaddr) # # Test subtractFeeFromOutputs option # # # Make sure there is exactly one input so coin selection can't skew the # result assert_equal(len(self.nodes[3].listunspent(1)), 1) inputs = [] outputs = {self.nodes[2].getnewaddress(): 1} rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee) self.nodes[3].fundrawtransaction( rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list self.nodes[3].fundrawtransaction( rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee) self.nodes[3].fundrawtransaction( rawtx, {"feeRate": 2 * min_relay_tx_fee}), self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * min_relay_tx_fee, "subtractFeeFromOutputs": [0]})] dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result] output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)] change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)] assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee']) assert_equal(result[3]['fee'], result[4]['fee']) assert_equal(change[0], change[1]) assert_equal(output[0], output[1]) assert_equal(output[0], output[2] + result[2]['fee']) assert_equal(change[0] + result[0]['fee'], change[2]) assert_equal(output[3], output[4] + result[4]['fee']) assert_equal(change[3] + result[3]['fee'], change[4]) inputs = [] outputs = { self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)} keys = list(outputs.keys()) rawtx = self.nodes[3].createrawtransaction(inputs, outputs) result = [self.nodes[3].fundrawtransaction(rawtx), # split the fee between outputs 0, 2, and 3, but not output 1 self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})] dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']), self.nodes[3].decoderawtransaction(result[1]['hex'])] # Nested list of non-change output amounts for each transaction output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']] for d, r in zip(dec_tx, result)] # List of differences in output amounts between normal and subtractFee # transactions share = [o0 - o1 for o0, o1 in zip(output[0], output[1])] # output 1 is the same in both transactions assert_equal(share[1], 0) # the other 3 outputs are smaller as a result of subtractFeeFromOutputs assert_greater_than(share[0], 0) assert_greater_than(share[2], 0) assert_greater_than(share[3], 0) # outputs 2 and 3 take the same share of the fee assert_equal(share[2], share[3]) # output 0 takes at least as much share of the fee, and no more than 2 # satoshis more, than outputs 2 and 3 assert_greater_than_or_equal(share[0], share[2]) assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0]) # the fee is the same in both transactions assert_equal(result[0]['fee'], result[1]['fee']) # the total subtracted from the outputs is equal to the fee assert_equal(share[0] + share[2] + share[3], result[0]['fee']) if __name__ == '__main__': RawTransactionsTest().main() diff --git a/qa/rpc-tests/getblocktemplate_proposals.py b/qa/rpc-tests/getblocktemplate_proposals.py index c12f37908..e7491bb22 100755 --- a/qa/rpc-tests/getblocktemplate_proposals.py +++ b/qa/rpc-tests/getblocktemplate_proposals.py @@ -1,181 +1,177 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from binascii import a2b_hex, b2a_hex from hashlib import sha256 from struct import pack def b2x(b): return b2a_hex(b).decode('ascii') # NOTE: This does not work for signed numbers (set the high bit) or zero # (use b'\0') def encodeUNum(n): s = bytearray(b'\1') while n > 127: s[0] += 1 s.append(n % 256) n //= 256 s.append(n) return bytes(s) def varlenEncode(n): if n < 0xfd: return pack(' 1: n = [] if len(cur) & 1: cur.append(cur[-1]) for i in range(0, len(cur), 2): n.append(dblsha(cur[i] + cur[i + 1])) cur = n return cur[0] def template_to_bytearray(tmpl, txlist): blkver = pack('1week) are dropped if uploadtarget has been reached. * Verify that getdata requests for recent blocks are respecteved even if uploadtarget has been reached. * Verify that the upload counters are reset after 24 hours. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() self.block_receive_map = {} def add_connection(self, conn): self.connection = conn self.peer_disconnected = False def on_inv(self, conn, message): pass # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message def on_block(self, conn, message): message.block.calc_sha256() try: self.block_receive_map[message.block.sha256] += 1 except KeyError as e: self.block_receive_map[message.block.sha256] = 1 # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): def veracked(): return self.verack_received return wait_until(veracked, timeout=10) def wait_for_disconnect(self): def disconnected(): return self.peer_disconnected return wait_until(disconnected, timeout=10) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message def on_close(self, conn): self.peer_disconnected = True # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): def received_pong(): return (self.last_pong.nonce == self.ping_counter) self.connection.send_message(msg_ping(nonce=self.ping_counter)) success = wait_until(received_pong, timeout=timeout) self.ping_counter += 1 return success class MaxUploadTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 + # Start a node with maxuploadtarget of 200 MB (/24h) + self.extra_args = [["-maxuploadtarget=200"]] # Cache for utxos, as the listunspent may take a long time later in the # test self.utxo_cache = [] - def setup_network(self): - # Start a node with maxuploadtarget of 200 MB (/24h) - self.nodes = [] - self.nodes.append( - start_node( - 0, self.options.tmpdir, ["-maxuploadtarget=200"])) - def run_test(self): # Before we connect anything, we first set the time on the node # to be in the past, otherwise things break because the CNode # time counters can't be reset backward after initialization old_time = int(time.time() - 2 * 60 * 60 * 24 * 7) self.nodes[0].setmocktime(old_time) # Generate some old blocks self.nodes[0].generate(130) # test_nodes[0] will only request old blocks # test_nodes[1] will only request new blocks # test_nodes[2] will test resetting the counters test_nodes = [] connections = [] for i in range(3): test_nodes.append(TestNode()) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) test_nodes[i].add_connection(connections[i]) # Start up network handling in another thread NetworkThread().start() [x.wait_for_verack() for x in test_nodes] # Test logic begins here # Now mine a big block mine_large_block(self.nodes[0], self.utxo_cache) # Store the hash; we'll request this later big_old_block = self.nodes[0].getbestblockhash() old_block_size = self.nodes[0].getblock(big_old_block, True)['size'] big_old_block = int(big_old_block, 16) # Advance to two days ago self.nodes[0].setmocktime(int(time.time()) - 2 * 60 * 60 * 24) # Mine one more block, so that the prior block looks old mine_large_block(self.nodes[0], self.utxo_cache) # We'll be requesting this new block too big_new_block = self.nodes[0].getbestblockhash() big_new_block = int(big_new_block, 16) # test_nodes[0] will test what happens if we just keep requesting the # the same big old block too many times (expect: disconnect) getdata_request = msg_getdata() getdata_request.inv.append(CInv(2, big_old_block)) max_bytes_per_day = 200 * 1024 * 1024 daily_buffer = 144 * LEGACY_MAX_BLOCK_SIZE max_bytes_available = max_bytes_per_day - daily_buffer success_count = max_bytes_available // old_block_size # 144MB will be reserved for relaying new blocks, so expect this to # succeed for ~70 tries. for i in range(success_count): test_nodes[0].send_message(getdata_request) test_nodes[0].sync_with_ping() assert_equal(test_nodes[0].block_receive_map[big_old_block], i + 1) assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). for i in range(3): test_nodes[0].send_message(getdata_request) test_nodes[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) self.log.info( "Peer 0 disconnected after downloading old block too many times") # Requesting the current block on test_nodes[1] should succeed indefinitely, # even when over the max upload target. # We'll try 200 times getdata_request.inv = [CInv(2, big_new_block)] for i in range(200): test_nodes[1].send_message(getdata_request) test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1) self.log.info("Peer 1 able to repeatedly download new block") # But if test_nodes[1] tries for an old block, it gets disconnected # too. getdata_request.inv = [CInv(2, big_old_block)] test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 1) self.log.info("Peer 1 disconnected after trying to download old block") self.log.info("Advancing system time on node to clear counters...") # If we advance the time by 24 hours, then the counters should reset, # and test_nodes[2] should be able to retrieve the old block. self.nodes[0].setmocktime(int(time.time())) test_nodes[2].sync_with_ping() test_nodes[2].send_message(getdata_request) test_nodes[2].sync_with_ping() assert_equal(test_nodes[2].block_receive_map[big_old_block], 1) self.log.info("Peer 2 able to download old block") [c.disconnect_node() for c in connections] # stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1 self.log.info("Restarting nodes with -whitelist=127.0.0.1") stop_node(self.nodes[0], 0) self.nodes[0] = start_node(0, self.options.tmpdir, [ "-whitelist=127.0.0.1", "-maxuploadtarget=1"]) # recreate/reconnect 3 test nodes test_nodes = [] connections = [] for i in range(3): test_nodes.append(TestNode()) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i])) test_nodes[i].add_connection(connections[i]) # Start up network handling in another thread NetworkThread().start() [x.wait_for_verack() for x in test_nodes] # retrieve 20 blocks which should be enough to break the 1MB limit getdata_request.inv = [CInv(2, big_new_block)] for i in range(20): test_nodes[1].send_message(getdata_request) test_nodes[1].sync_with_ping() assert_equal(test_nodes[1].block_receive_map[big_new_block], i + 1) getdata_request.inv = [CInv(2, big_old_block)] test_nodes[1].send_message(getdata_request) test_nodes[1].wait_for_disconnect() # node is still connected because of the whitelist assert_equal(len(self.nodes[0].getpeerinfo()), 3) self.log.info( "Peer 1 still connected after trying to download old block (whitelisted)") [c.disconnect_node() for c in connections] if __name__ == '__main__': MaxUploadTest().main() diff --git a/qa/rpc-tests/mempool_limit.py b/qa/rpc-tests/mempool_limit.py index 7f9dc32d3..0627e2335 100755 --- a/qa/rpc-tests/mempool_limit.py +++ b/qa/rpc-tests/mempool_limit.py @@ -1,61 +1,55 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Test mempool limiting together/eviction with the wallet from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class MempoolLimitTest(BitcoinTestFramework): - def setup_network(self): - self.nodes = [] - self.nodes.append( - start_node(0, self.options.tmpdir, - ["-maxmempool=5", "-spendzeroconfchange=0"])) - self.is_network_split = False - self.sync_all() - self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] - def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - self.txouts = gen_return_txouts() + self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]] def run_test(self): + txouts = gen_return_txouts() + relayfee = self.nodes[0].getnetworkinfo()['relayfee'] + txids = [] - utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], 91) + utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91) # create a mempool tx that will be evicted us0 = utxos.pop() inputs = [{"txid": us0["txid"], "vout": us0["vout"]}] outputs = {self.nodes[0].getnewaddress(): 0.0001} tx = self.nodes[0].createrawtransaction(inputs, outputs) # specifically fund this tx with low fee - self.nodes[0].settxfee(self.relayfee) + self.nodes[0].settxfee(relayfee) txF = self.nodes[0].fundrawtransaction(tx) # return to automatic fee selection self.nodes[0].settxfee(0) txFS = self.nodes[0].signrawtransaction( txF['hex'], None, None, "ALL|FORKID") txid = self.nodes[0].sendrawtransaction(txFS['hex']) relayfee = self.nodes[0].getnetworkinfo()['relayfee'] base_fee = relayfee * 100 for i in range(3): txids.append([]) txids[i] = create_lots_of_big_transactions( - self.nodes[0], self.txouts, utxos[30 * i:30 * i + 30], 30, (i + 1) * base_fee) + self.nodes[0], txouts, utxos[30 * i:30 * i + 30], 30, (i + 1) * base_fee) # by now, the tx should be evicted, check confirmation state assert(txid not in self.nodes[0].getrawmempool()) txdata = self.nodes[0].gettransaction(txid) assert(txdata['confirmations'] == 0) # confirmation should still be 0 if __name__ == '__main__': MempoolLimitTest().main() diff --git a/qa/rpc-tests/mempool_packages.py b/qa/rpc-tests/mempool_packages.py index c0b67ec2a..893ac2a1c 100755 --- a/qa/rpc-tests/mempool_packages.py +++ b/qa/rpc-tests/mempool_packages.py @@ -1,280 +1,271 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Test descendant package tracking code from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import COIN MAX_ANCESTORS = 25 MAX_DESCENDANTS = 25 class MempoolPackagesTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False - - def setup_network(self): - self.nodes = [] - self.nodes.append( - start_node(0, self.options.tmpdir, ["-maxorphantx=1000"])) - self.nodes.append( - start_node(1, self.options.tmpdir, ["-maxorphantx=1000", - "-limitancestorcount=5"])) - connect_nodes(self.nodes[0], 1) - self.is_network_split = False - self.sync_all() + self.extra_args = [["-maxorphantx=1000"], [ + "-maxorphantx=1000", "-limitancestorcount=5"]] # Build a transaction that spends parent_txid:vout # Return amount sent def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) # make sure we didn't generate a change output assert(len(fulltx['vout']) == num_outputs) return (txid, send_value) def run_test(self): ''' Mine some blocks and have them mature. ''' self.nodes[0].generate(101) utxo = self.nodes[0].listunspent(10) txid = utxo[0]['txid'] vout = utxo[0]['vout'] value = utxo[0]['amount'] fee = Decimal("0.0001") # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] for i in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) # Check mempool has MAX_ANCESTORS transactions in it, and descendant # count and fees should look correct mempool = self.nodes[0].getrawmempool(True) assert_equal(len(mempool), MAX_ANCESTORS) descendant_count = 1 descendant_fees = 0 descendant_size = 0 descendants = [] ancestors = list(chain) for x in reversed(chain): # Check that getmempoolentry is consistent with getrawmempool entry = self.nodes[0].getmempoolentry(x) assert_equal(entry, mempool[x]) # Check that the descendant calculations are correct assert_equal(mempool[x]['descendantcount'], descendant_count) descendant_fees += mempool[x]['fee'] assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']) assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN) descendant_size += mempool[x]['size'] assert_equal(mempool[x]['descendantsize'], descendant_size) descendant_count += 1 # Check that getmempooldescendants is correct assert_equal(sorted(descendants), sorted( self.nodes[0].getmempooldescendants(x))) descendants.append(x) # Check that getmempoolancestors is correct ancestors.remove(x) assert_equal(sorted(ancestors), sorted( self.nodes[0].getmempoolancestors(x))) # Check that getmempoolancestors/getmempooldescendants correctly handle # verbose=true v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True) assert_equal(len(v_ancestors), len(chain) - 1) for x in v_ancestors.keys(): assert_equal(mempool[x], v_ancestors[x]) assert(chain[-1] not in v_ancestors.keys()) v_descendants = self.nodes[0].getmempooldescendants(chain[0], True) assert_equal(len(v_descendants), len(chain) - 1) for x in v_descendants.keys(): assert_equal(mempool[x], v_descendants[x]) assert(chain[0] not in v_descendants.keys()) # Check that ancestor modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(chain[0], 0, 1000) mempool = self.nodes[0].getrawmempool(True) ancestor_fees = 0 for x in chain: ancestor_fees += mempool[x]['fee'] assert_equal( mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000) # Undo the prioritisetransaction for later tests self.nodes[0].prioritisetransaction(chain[0], 0, -1000) # Check that descendant modified fees includes fee deltas from # prioritisetransaction self.nodes[0].prioritisetransaction(chain[-1], 0, 1000) mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] assert_equal( mempool[x]['descendantfees'], descendant_fees * COIN + 1000) # Adding one more transaction on to the chain should fail. try: self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) except JSONRPCException as e: self.log.info("too-long-ancestor-chain successfully rejected") # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. self.nodes[0].generate(1) sync_blocks(self.nodes) assert_equal(len(self.nodes[0].getrawmempool()), 0) # Prioritise a transaction that has been mined, then add it back to the # mempool by using invalidateblock. self.nodes[0].prioritisetransaction(chain[-1], 0, 2000) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Keep node1's tip synced with node0 self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) # Now check that the transaction is in the mempool, with the right # modified fee mempool = self.nodes[0].getrawmempool(True) descendant_fees = 0 for x in reversed(chain): descendant_fees += mempool[x]['fee'] if (x == chain[-1]): assert_equal( mempool[x]['modifiedfee'], mempool[x]['fee'] + satoshi_round(0.00002)) assert_equal( mempool[x]['descendantfees'], descendant_fees * COIN + 2000) # TODO: check that node1's mempool is as expected # TODO: test ancestor size limits # Now test descendant chain limits txid = utxo[1]['txid'] value = utxo[1]['amount'] vout = utxo[1]['vout'] transaction_package = [] # First create one parent tx with 10 children (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, vout, value, fee, 10) parent_transaction = txid for i in range(10): transaction_package.append( {'txid': txid, 'vout': i, 'amount': sent_value}) for i in range(MAX_DESCENDANTS): utxo = transaction_package.pop(0) try: (txid, sent_value) = self.chain_transaction( self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) for j in range(10): transaction_package.append( {'txid': txid, 'vout': j, 'amount': sent_value}) if i == MAX_DESCENDANTS - 2: mempool = self.nodes[0].getrawmempool(True) assert_equal(mempool[parent_transaction][ 'descendantcount'], MAX_DESCENDANTS) except JSONRPCException as e: self.log.info(e.error['message']) assert_equal(i, MAX_DESCENDANTS - 1) self.log.info( "tx that would create too large descendant package successfully rejected") # TODO: check that node1's mempool is as expected # TODO: test descendant size limits # Test reorg handling # First, the basics: self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash()) self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash()) # Now test the case where node1 has a transaction T in its mempool that # depends on transactions A and B which are in a mined block, and the # block containing A and B is disconnected, AND B is not accepted back # into node1's mempool because its ancestor count is too high. # Create 8 transactions, like so: # Tx0 -> Tx1 (vout0) # \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7 # # Mine them in the next block, then generate a new tx8 that spends # Tx1 and Tx7, and add to node1's mempool, then disconnect the # last block. # Create tx0 with 2 outputs utxo = self.nodes[0].listunspent() txid = utxo[0]['txid'] value = utxo[0]['amount'] vout = utxo[0]['vout'] send_value = satoshi_round((value - fee) / 2) inputs = [{'txid': txid, 'vout': vout}] outputs = {} for i in range(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransaction( rawtx, None, None, "ALL|FORKID") txid = self.nodes[0].sendrawtransaction(signedtx['hex']) tx0_id = txid value = send_value # Create tx1 (tx1_id, tx1_value) = self.chain_transaction( self.nodes[0], tx0_id, 0, value, fee, 1) # Create tx2-7 vout = 1 txid = tx0_id for i in range(6): (txid, sent_value) = self.chain_transaction( self.nodes[0], txid, vout, value, fee, 1) vout = 0 value = sent_value # Mine these in a block self.nodes[0].generate(1) self.sync_all() # Now generate tx8, with a big fee inputs = [{'txid': tx1_id, 'vout': 0}, {'txid': txid, 'vout': 0}] outputs = {self.nodes[0].getnewaddress(): send_value + value - 4 * fee} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransaction( rawtx, None, None, "ALL|FORKID") txid = self.nodes[0].sendrawtransaction(signedtx['hex']) sync_mempools(self.nodes) # Now try to disconnect the tip on each node... self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) sync_blocks(self.nodes) if __name__ == '__main__': MempoolPackagesTest().main() diff --git a/qa/rpc-tests/mempool_reorg.py b/qa/rpc-tests/mempool_reorg.py index 316b644bf..0aeee8570 100755 --- a/qa/rpc-tests/mempool_reorg.py +++ b/qa/rpc-tests/mempool_reorg.py @@ -1,117 +1,109 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test re-org scenarios with a mempool that contains transactions # that spend (directly or indirectly) coinbase transactions. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False + self.extra_args = [["-checkmempool"]] * self.num_nodes alert_filename = None # Set by setup_network - def setup_network(self): - args = ["-checkmempool"] - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, args)) - self.nodes.append(start_node(1, self.options.tmpdir, args)) - connect_nodes(self.nodes[1], 0) - self.is_network_split = False - self.sync_all() - def run_test(self): start_count = self.nodes[0].getblockcount() # Mine three blocks. After this, nodes[0] blocks # 101, 102, and 103 are spend-able. new_blocks = self.nodes[1].generate(4) self.sync_all() node0_address = self.nodes[0].getnewaddress() node1_address = self.nodes[1].getnewaddress() # Three scenarios for re-orging coinbase spends in the memory pool: # 1. Direct coinbase spend : spend_101 # 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1 # 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1 # Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase), # and make sure the mempool code behaves correctly. b = [self.nodes[0].getblockhash(n) for n in range(101, 105)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spend_101_raw = create_tx( self.nodes[0], coinbase_txids[1], node1_address, 49.99) spend_102_raw = create_tx( self.nodes[0], coinbase_txids[2], node0_address, 49.99) spend_103_raw = create_tx( self.nodes[0], coinbase_txids[3], node0_address, 49.99) # Create a block-height-locked transaction which will be invalid after # reorg timelock_tx = self.nodes[0].createrawtransaction( [{"txid": coinbase_txids[0], "vout": 0}], {node0_address: 49.99}) # Set the time lock timelock_tx = timelock_tx.replace("ffffffff", "11111191", 1) timelock_tx = timelock_tx[:-8] + hex( self.nodes[0].getblockcount() + 2)[2:] + "000000" timelock_tx = self.nodes[0].signrawtransaction( timelock_tx, None, None, "ALL|FORKID")["hex"] assert_raises(JSONRPCException, self.nodes[ 0].sendrawtransaction, timelock_tx) # Broadcast and mine spend_102 and 103: spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw) spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw) self.nodes[0].generate(1) assert_raises(JSONRPCException, self.nodes[ 0].sendrawtransaction, timelock_tx) # Create 102_1 and 103_1: spend_102_1_raw = create_tx( self.nodes[0], spend_102_id, node1_address, 49.98) spend_103_1_raw = create_tx( self.nodes[0], spend_103_id, node1_address, 49.98) # Broadcast and mine 103_1: spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw) last_block = self.nodes[0].generate(1) timelock_tx_id = self.nodes[0].sendrawtransaction(timelock_tx) # ... now put spend_101 and spend_102_1 in memory pools: spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw) spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw) self.sync_all() assert_equal(set(self.nodes[0].getrawmempool()), { spend_101_id, spend_102_1_id, timelock_tx_id}) for node in self.nodes: node.invalidateblock(last_block[0]) assert_equal(set(self.nodes[0].getrawmempool()), { spend_101_id, spend_102_1_id, spend_103_1_id}) # Use invalidateblock to re-org back and make all those coinbase spends # immature/invalid: for node in self.nodes: node.invalidateblock(new_blocks[0]) self.sync_all() # mempool should be empty. assert_equal(set(self.nodes[0].getrawmempool()), set()) if __name__ == '__main__': MempoolCoinbaseTest().main() diff --git a/qa/rpc-tests/mempool_resurrect_test.py b/qa/rpc-tests/mempool_resurrect_test.py index 4d3038d2c..8a8952c2b 100755 --- a/qa/rpc-tests/mempool_resurrect_test.py +++ b/qa/rpc-tests/mempool_resurrect_test.py @@ -1,88 +1,84 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test resurrection of mined transactions when # the blockchain is re-organized. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = False - def setup_network(self): # Just need one node for this test - args = ["-checkmempool"] - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, args)) - self.is_network_split = False + self.extra_args = [["-checkmempool"]] def run_test(self): node0_address = self.nodes[0].getnewaddress() # Spend block 1/2/3's coinbase transactions # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [self.nodes[0].getblockhash(n) for n in range(1, 4)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spends1_raw = [create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids] spends1_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id] spends2_id = [self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool for node in self.nodes: node.invalidateblock(blocks[0]) # mempool should be empty, all txns confirmed assert_equal( set(self.nodes[0].getrawmempool()), set(spends1_id + spends2_id)) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id + spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main() diff --git a/qa/rpc-tests/mempool_spendcoinbase.py b/qa/rpc-tests/mempool_spendcoinbase.py index 186679c89..0cb991233 100755 --- a/qa/rpc-tests/mempool_spendcoinbase.py +++ b/qa/rpc-tests/mempool_spendcoinbase.py @@ -1,67 +1,63 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test spending coinbase transactions. # The coinbase transaction in block N can appear in block # N+100... so is valid in the mempool when the best block # height is N+99. # This test makes sure coinbase spends that will be mature # in the next block are accepted into the memory pool, # but less mature coinbase spends are NOT. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class MempoolSpendCoinbaseTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = False - def setup_network(self): # Just need one node for this test - args = ["-checkmempool"] - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, args)) - self.is_network_split = False + self.extra_args = [["-checkmempool"]] def run_test(self): chain_height = self.nodes[0].getblockcount() assert_equal(chain_height, 200) node0_address = self.nodes[0].getnewaddress() # Coinbase at height chain_height-100+1 ok in mempool, should # get mined. Coinbase at height chain_height-100+2 is # is too immature to spend. b = [self.nodes[0].getblockhash(n) for n in range(101, 103)] coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b] spends_raw = [create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids] spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0]) # coinbase at height 102 should be too immature to spend assert_raises(JSONRPCException, self.nodes[ 0].sendrawtransaction, spends_raw[1]) # mempool should have just spend_101: assert_equal(self.nodes[0].getrawmempool(), [spend_101_id]) # mine a block, spend_101 should get confirmed self.nodes[0].generate(1) assert_equal(set(self.nodes[0].getrawmempool()), set()) # ... and now height 102 can be spent: spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1]) assert_equal(self.nodes[0].getrawmempool(), [spend_102_id]) if __name__ == '__main__': MempoolSpendCoinbaseTest().main() diff --git a/qa/rpc-tests/merkle_blocks.py b/qa/rpc-tests/merkle_blocks.py index 08164e4a8..09322b644 100755 --- a/qa/rpc-tests/merkle_blocks.py +++ b/qa/rpc-tests/merkle_blocks.py @@ -1,105 +1,99 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test merkleblock fetch/validation # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class MerkleBlockTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 + # Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing + self.extra_args = [[], [], [], ["-txindex"]] def setup_network(self): - self.nodes = [] - # Nodes 0/1 are "wallet" nodes - self.nodes.append(start_node(0, self.options.tmpdir)) - self.nodes.append(start_node(1, self.options.tmpdir)) - # Nodes 2/3 are used for testing - self.nodes.append(start_node(2, self.options.tmpdir)) - self.nodes.append( - start_node(3, self.options.tmpdir, ["-txindex"])) + self.setup_nodes() connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[0], 3) - self.is_network_split = False self.sync_all() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(105) self.sync_all() chain_height = self.nodes[1].getblockcount() assert_equal(chain_height, 105) assert_equal(self.nodes[1].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 0) node0utxos = self.nodes[0].listunspent(1) tx1 = self.nodes[0].createrawtransaction( [node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) txid1 = self.nodes[0].sendrawtransaction( self.nodes[0].signrawtransaction(tx1, None, None, "ALL|FORKID")["hex"]) tx2 = self.nodes[0].createrawtransaction( [node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99}) txid2 = self.nodes[0].sendrawtransaction( self.nodes[0].signrawtransaction(tx2, None, None, "ALL|FORKID")["hex"]) assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1]) self.nodes[0].generate(1) blockhash = self.nodes[0].getblockhash(chain_height + 1) self.sync_all() txlist = [] blocktxn = self.nodes[0].getblock(blockhash, True)["tx"] txlist.append(blocktxn[1]) txlist.append(blocktxn[2]) assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1])), [txid1]) assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1, txid2])), txlist) assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist) txin_spent = self.nodes[1].listunspent(1).pop() tx3 = self.nodes[1].createrawtransaction( [txin_spent], {self.nodes[0].getnewaddress(): 49.98}) self.nodes[0].sendrawtransaction( self.nodes[1].signrawtransaction(tx3, None, None, "ALL|FORKID")["hex"]) self.nodes[0].generate(1) self.sync_all() txid_spent = txin_spent["txid"] txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2 # We can't find the block from a fully-spent tx assert_raises( JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent]) # ...but we can if we specify the block assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent]) # ...or if the first tx is not fully-spent assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent]) try: assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid1, txid2])), txlist) except JSONRPCException: assert_equal(self.nodes[2].verifytxoutproof( self.nodes[2].gettxoutproof([txid2, txid1])), txlist) # ...or if we have a -txindex assert_equal(self.nodes[2].verifytxoutproof( self.nodes[3].gettxoutproof([txid_spent])), [txid_spent]) if __name__ == '__main__': MerkleBlockTest().main() diff --git a/qa/rpc-tests/multi_rpc.py b/qa/rpc-tests/multi_rpc.py index b513bc95e..ee29fd067 100755 --- a/qa/rpc-tests/multi_rpc.py +++ b/qa/rpc-tests/multi_rpc.py @@ -1,121 +1,118 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test multiple rpc user config option rpcauth # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import str_to_b64str, assert_equal import os import http.client import urllib.parse class HTTPBasicsTest (BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = False self.num_nodes = 1 def setup_chain(self): super().setup_chain() # Append rpcauth to bitcoin.conf before initialization rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" with open(os.path.join(self.options.tmpdir + "/node0", "bitcoin.conf"), 'a', encoding='utf8') as f: f.write(rpcauth + "\n") f.write(rpcauth2 + "\n") - def setup_network(self): - self.nodes = self.setup_nodes() - def run_test(self): # # Check correctness of the rpcauth config option # # url = urllib.parse.urlparse(self.nodes[0].url) # Old authpair authpair = url.username + ':' + url.password # New authpair generated via share/rpcuser tool rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144" password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM=" # Second authpair with different username rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e" password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI=" authpairnew = "rt:" + password headers = {"Authorization": "Basic " + str_to_b64str(authpair)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status == 401, False) conn.close() # Use new authpair to confirm both work headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status == 401, False) conn.close() # Wrong login name with rt's password authpairnew = "rtwrong:" + password headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status == 401, True) conn.close() # Wrong password for rt authpairnew = "rt:" + password + "wrong" headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status == 401, True) conn.close() # Correct for rt2 authpairnew = "rt2:" + password2 headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status == 401, False) conn.close() # Wrong password for rt2 authpairnew = "rt2:" + password2 + "wrong" headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)} conn = http.client.HTTPConnection(url.hostname, url.port) conn.connect() conn.request('POST', '/', '{"method": "getbestblockhash"}', headers) resp = conn.getresponse() assert_equal(resp.status == 401, True) conn.close() if __name__ == '__main__': HTTPBasicsTest().main() diff --git a/qa/rpc-tests/nulldummy.py b/qa/rpc-tests/nulldummy.py index eef77c638..22942e805 100755 --- a/qa/rpc-tests/nulldummy.py +++ b/qa/rpc-tests/nulldummy.py @@ -1,132 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block from test_framework.script import CScript from io import BytesIO import time NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" def trueDummy(tx): scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): assert(len(i) == 0) newscript.append(b'\x51') else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) tx.rehash() ''' This test is meant to exercise NULLDUMMY softfork. Connect to a single node. Generate 2 blocks (save the coinbases for later). Generate 427 more blocks. [Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. [Policy] Check that non-NULLDUMMY transactions are rejected before activation. [Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. [Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. ''' class NULLDUMMYTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = True - - def setup_network(self): - # Must set the blockversion for this test - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1', - '-walletprematurewitness']]) + self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']] def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1, [self.address]) NetworkThread().start() # Start up network handling in another thread self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 coinbase_txid = [] for i in self.coinbase_blocks: coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) self.nodes[0].generate(427) # Block 429 self.lastblockhash = self.nodes[0].getbestblockhash() self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = 429 self.lastblocktime = int(time.time()) + 429 self.log.info( "Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction( self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.tx_submit(self.nodes[0], test1txs[0]) test1txs.append(self.create_transaction( self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.tx_submit(self.nodes[0], test1txs[1]) self.block_submit(self.nodes[0], test1txs, False, True) self.log.info( "Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction( self.nodes[0], txid2, self.ms_address, 48) trueDummy(test2tx) txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR) self.log.info( "Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") self.block_submit(self.nodes[0], [test2tx], False, True) def create_transaction(self, node, txid, to_address, amount): inputs = [{"txid": txid, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def tx_submit(self, node, tx, msg=""): tx.rehash() try: node.sendrawtransaction( bytes_to_hex_str(tx.serialize_with_witness()), True) except JSONRPCException as exp: assert_equal(exp.error["message"], msg) else: assert_equal('', msg) return tx.hash def block_submit(self, node, txs, witness=False, accept=False): block = create_block(self.tip, create_coinbase( self.lastblockheight + 1), self.lastblocktime + 1) block.nVersion = 4 for tx in txs: tx.rehash() block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block.solve() node.submitblock(bytes_to_hex_str(block.serialize(True))) if (accept): assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 else: assert_equal(node.getbestblockhash(), self.lastblockhash) if __name__ == '__main__': NULLDUMMYTest().main() diff --git a/qa/rpc-tests/p2p-acceptblock.py b/qa/rpc-tests/p2p-acceptblock.py index 45decf733..639ff63fa 100755 --- a/qa/rpc-tests/p2p-acceptblock.py +++ b/qa/rpc-tests/p2p-acceptblock.py @@ -1,296 +1,292 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time from test_framework.blocktools import create_block, create_coinbase ''' AcceptBlockTest -- test processing of unrequested blocks. Since behavior differs when receiving unrequested blocks from whitelisted peers versus non-whitelisted peers, this tests the behavior of both (effectively two separate tests running in parallel). Setup: two nodes, node0 and node1, not connected to each other. Node0 does not whitelist localhost, but node1 does. They will each be on their own chain for this test. We have one NodeConn connection to each, test_node and white_node respectively. The test: 1. Generate one block on each node, to leave IBD. 2. Mine a new block on each tip, and deliver to each node from node's peer. The tip should advance. 3. Mine a block that forks the previous block, and deliver to each node from corresponding peer. Node0 should not process this block (just accept the header), because it is unrequested and doesn't have more work than the tip. Node1 should process because this is coming from a whitelisted peer. 4. Send another block that builds on the forking block. Node0 should process this block but be stuck on the shorter chain, because it's missing an intermediate block. Node1 should reorg to this longer chain. 4b.Send 288 more blocks on the longer chain. Node0 should process all but the last block (too far ahead in height). Send all headers to Node1, and then send the last block in that chain. Node1 should accept the block because it's coming from a whitelisted peer. 5. Send a duplicate of the block in #3 to Node0. Node0 should not process the block because it is unrequested, and stay on the shorter chain. 6. Send Node0 an inv for the height 3 block produced in #4 above. Node0 should figure out that Node0 has the missing height 2 block and send a getdata. 7. Send Node0 the missing block again. Node0 should process and the tip should advance. ''' # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message # Spin until verack message is received from the node. # We use this to signal that our test can begin. This is called from the # testing thread, so it needs to acquire the global lock. def wait_for_verack(self): while True: with mininode_lock: if self.verack_received: return time.sleep(0.05) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class AcceptBlockTest(BitcoinTestFramework): def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to test") def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 + self.extra_args = [[], ["-whitelist=127.0.0.1"]] def setup_network(self): # Node0 will be used to test behavior of processing unrequested blocks # from peers which are not whitelisted, while Node1 will be used for # the whitelisted case. - self.nodes = [] - self.nodes.append(start_node(0, self.options.tmpdir, - binary=self.options.testbinary)) - self.nodes.append(start_node(1, self.options.tmpdir, - ["-whitelist=127.0.0.1"], - binary=self.options.testbinary)) + self.setup_nodes() def run_test(self): # Setup the p2p connections and start up the network thread. test_node = TestNode() # connects to node0 (not whitelisted) white_node = TestNode() # connects to node1 (whitelisted) connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) connections.append( NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node)) test_node.add_connection(connections[0]) white_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() white_node.wait_for_verack() # 1. Have both nodes mine a block (leave IBD) [n.generate(1) for n in self.nodes] tips = [int("0x" + n.getbestblockhash(), 0) for n in self.nodes] # 2. Send one block that builds on each tip. # This should be accepted. blocks_h2 = [] # the height 2 blocks on each node's chain block_time = int(time.time()) + 1 for i in range(2): blocks_h2.append( create_block(tips[i], create_coinbase(2), block_time)) blocks_h2[i].solve() block_time += 1 test_node.send_message(msg_block(blocks_h2[0])) white_node.send_message(msg_block(blocks_h2[1])) [x.sync_with_ping() for x in [test_node, white_node]] assert_equal(self.nodes[0].getblockcount(), 2) assert_equal(self.nodes[1].getblockcount(), 2) self.log.info("First height 2 block accepted by both nodes") # 3. Send another block that builds on the original tip. blocks_h2f = [] # Blocks at height 2 that fork off the main chain for i in range(2): blocks_h2f.append( create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime + 1)) blocks_h2f[i].solve() test_node.send_message(msg_block(blocks_h2f[0])) white_node.send_message(msg_block(blocks_h2f[1])) [x.sync_with_ping() for x in [test_node, white_node]] for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h2f[0].hash: assert_equal(x['status'], "headers-only") for x in self.nodes[1].getchaintips(): if x['hash'] == blocks_h2f[1].hash: assert_equal(x['status'], "valid-headers") self.log.info( "Second height 2 block accepted only from whitelisted peer") # 4. Now send another block that builds on the forking chain. blocks_h3 = [] for i in range(2): blocks_h3.append( create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime + 1)) blocks_h3[i].solve() test_node.send_message(msg_block(blocks_h3[0])) white_node.send_message(msg_block(blocks_h3[1])) [x.sync_with_ping() for x in [test_node, white_node]] # Since the earlier block was not processed by node0, the new block # can't be fully validated. for x in self.nodes[0].getchaintips(): if x['hash'] == blocks_h3[0].hash: assert_equal(x['status'], "headers-only") # But this block should be accepted by node0 since it has more work. self.nodes[0].getblock(blocks_h3[0].hash) self.log.info( "Unrequested more-work block accepted from non-whitelisted peer") # Node1 should have accepted and reorged. assert_equal(self.nodes[1].getblockcount(), 3) self.log.info( "Successfully reorged to length 3 chain from whitelisted peer") # 4b. Now mine 288 more blocks and deliver; all should be processed but # the last (height-too-high) on node0. Node1 should process the tip if # we give it the headers chain leading to the tip. tips = blocks_h3 headers_message = msg_headers() all_blocks = [] # node0's blocks for j in range(2): for i in range(288): next_block = create_block( tips[j].sha256, create_coinbase(i + 4), tips[j].nTime + 1) next_block.solve() if j == 0: test_node.send_message(msg_block(next_block)) all_blocks.append(next_block) else: headers_message.headers.append(CBlockHeader(next_block)) tips[j] = next_block time.sleep(2) # Blocks 1-287 should be accepted, block 288 should be ignored because # it's too far ahead for x in all_blocks[:-1]: self.nodes[0].getblock(x.hash) assert_raises_jsonrpc( -1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash) headers_message.headers.pop() # Ensure the last block is unrequested white_node.send_message(headers_message) # Send headers leading to tip white_node.send_message(msg_block(tips[1])) # Now deliver the tip white_node.sync_with_ping() self.nodes[1].getblock(tips[1].hash) self.log.info( "Unrequested block far ahead of tip accepted from whitelisted peer") # 5. Test handling of unrequested block on the node that didn't process # Should still not be processed (even though it has a child that has more # work). test_node.send_message(msg_block(blocks_h2f[0])) # Here, if the sleep is too short, the test could falsely succeed (if the # node hasn't processed the block by the time the sleep returns, and then # the node processes it and incorrectly advances the tip). # But this would be caught later on, when we verify that an inv triggers # a getdata request for this block. test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 2) self.log.info( "Unrequested block that would complete more-work chain was ignored") # 6. Try to get node to request the missing block. # Poke the node with an inv for block at height 3 and see if that # triggers a getdata on block 2 (it should if block 2 is missing). with mininode_lock: # Clear state so we can check the getdata request test_node.last_getdata = None test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)])) test_node.sync_with_ping() with mininode_lock: getdata = test_node.last_getdata # Check that the getdata includes the right block assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256) self.log.info("Inv at tip triggered getdata for unprocessed block") # 7. Send the missing block for the third time (now it is requested) test_node.send_message(msg_block(blocks_h2f[0])) test_node.sync_with_ping() assert_equal(self.nodes[0].getblockcount(), 290) self.log.info( "Successfully reorged to longer chain from non-whitelisted peer") [c.disconnect_node() for c in connections] if __name__ == '__main__': AcceptBlockTest().main() diff --git a/qa/rpc-tests/p2p-compactblocks.py b/qa/rpc-tests/p2p-compactblocks.py index dea9a3cdc..db5b7cd89 100755 --- a/qa/rpc-tests/p2p-compactblocks.py +++ b/qa/rpc-tests/p2p-compactblocks.py @@ -1,960 +1,953 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment from test_framework.siphash import siphash256 from test_framework.script import CScript, OP_TRUE ''' CompactBlocksTest -- test compact blocks (BIP 152) Only testing Version 1 compact blocks (txids) ''' # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(SingleNodeConnCB): def __init__(self): SingleNodeConnCB.__init__(self) self.last_sendcmpct = [] self.last_headers = None self.last_inv = None self.last_cmpctblock = None self.block_announced = False self.last_getdata = None self.last_getheaders = None self.last_getblocktxn = None self.last_block = None self.last_blocktxn = None # Store the hashes of blocks we've seen announced. # This is for synchronizing the p2p message traffic, # so we can eg wait until a particular block is announced. self.set_announced_blockhashes = set() def on_sendcmpct(self, conn, message): self.last_sendcmpct.append(message) def on_block(self, conn, message): self.last_block = message def on_cmpctblock(self, conn, message): self.last_cmpctblock = message self.block_announced = True self.last_cmpctblock.header_and_shortids.header.calc_sha256() self.set_announced_blockhashes.add( self.last_cmpctblock.header_and_shortids.header.sha256) def on_headers(self, conn, message): self.last_headers = message self.block_announced = True for x in self.last_headers.headers: x.calc_sha256() self.set_announced_blockhashes.add(x.sha256) def on_inv(self, conn, message): self.last_inv = message for x in self.last_inv.inv: if x.type == 2: self.block_announced = True self.set_announced_blockhashes.add(x.hash) def on_getdata(self, conn, message): self.last_getdata = message def on_getheaders(self, conn, message): self.last_getheaders = message def on_getblocktxn(self, conn, message): self.last_getblocktxn = message def on_blocktxn(self, conn, message): self.last_blocktxn = message # Requires caller to hold mininode_lock def received_block_announcement(self): return self.block_announced def clear_block_announcement(self): with mininode_lock: self.block_announced = False self.last_inv = None self.last_headers = None self.last_cmpctblock = None def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [CBlockHeader(b) for b in new_blocks] self.send_message(headers_message) def request_headers_and_sync(self, locator, hashstop=0): self.clear_block_announcement() self.get_headers(locator, hashstop) assert(wait_until(self.received_block_announcement, timeout=30)) assert(self.received_block_announcement()) self.clear_block_announcement() # Block until a block announcement for a particular block hash is # received. def wait_for_block_announcement(self, block_hash, timeout=30): def received_hash(): return (block_hash in self.set_announced_blockhashes) return wait_until(received_hash, timeout=timeout) class CompactBlocksTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 + self.extra_args = [[], ["-txindex"]] self.utxos = [] - def setup_network(self): - self.nodes = [] - - # Start up two version 1 CB nodes. - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, - [[], ["-txindex"]]) - connect_nodes(self.nodes[0], 1) - def build_block_on_tip(self, node): height = node.getblockcount() tip = node.getbestblockhash() mtp = node.getblockheader(tip)['mediantime'] block = create_block( int(tip, 16), create_coinbase(height + 1), mtp + 1) block.nVersion = 4 block.solve() return block # Create 10 more anyone-can-spend utxo's for testing. def make_utxos(self): # Doesn't matter which node we use, just use node0. block = self.build_block_on_tip(self.nodes[0]) self.test_node.send_and_ping(msg_block(block)) assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256) self.nodes[0].generate(100) total_value = block.vtx[0].vout[0].nValue out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) for i in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() block2 = self.build_block_on_tip(self.nodes[0]) block2.vtx.append(tx) block2.hashMerkleRoot = block2.calc_merkle_root() block2.solve() self.test_node.send_and_ping(msg_block(block2)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256) self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)]) return # Test "sendcmpct" (between peers preferring the same version): # - No compact block announcements unless sendcmpct is sent. # - If sendcmpct is sent with version > preferred_version, the message is ignored. # - If sendcmpct is sent with boolean 0, then block announcements are not # made with compact blocks. # - If sendcmpct is then sent with boolean 1, then new block announcements # are made with compact blocks. # If old_node is passed in, request compact blocks with version=preferred-1 # and verify that it receives block announcements via compact block. def test_sendcmpct(self, node, test_node, preferred_version, old_node=None): # Make sure we get a SENDCMPCT message from our peer def received_sendcmpct(): return (len(test_node.last_sendcmpct) > 0) got_message = wait_until(received_sendcmpct, timeout=30) assert(received_sendcmpct()) assert(got_message) with mininode_lock: # Check that the first version received is the preferred one assert_equal( test_node.last_sendcmpct[0].version, preferred_version) # And that we receive versions down to 1. assert_equal(test_node.last_sendcmpct[-1].version, 1) test_node.last_sendcmpct = [] tip = int(node.getbestblockhash(), 16) def check_announcement_of_new_block(node, peer, predicate): peer.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) peer.wait_for_block_announcement(block_hash, timeout=30) assert(peer.block_announced) assert(got_message) with mininode_lock: assert predicate(peer), ( "block_hash={!r}, cmpctblock={!r}, inv={!r}".format( block_hash, peer.last_cmpctblock, peer.last_inv)) # We shouldn't get any block announcements via cmpctblock yet. check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is None) # Try one more time, this time after requesting headers. test_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None) # Test a few ways of using sendcmpct that should NOT # result in compact block announcements. # Before each test, sync the headers chain. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with too-high version sendcmpct = msg_sendcmpct() sendcmpct.version = 999 # was: preferred_version+1 sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is None) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Now try a SENDCMPCT message with valid version, but announce=False sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is None) # Headers sync before next test. test_node.request_headers_and_sync(locator=[tip]) # Finally, try a SENDCMPCT message with announce=True sendcmpct.version = preferred_version sendcmpct.announce = True test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time (no headers sync should be needed!) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time, after turning on sendheaders test_node.send_and_ping(msg_sendheaders()) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is not None) # Try one more time, after sending a version-1, announce=false message. sendcmpct.version = preferred_version - 1 sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is not None) # Now turn off announcements sendcmpct.version = preferred_version sendcmpct.announce = False test_node.send_and_ping(sendcmpct) check_announcement_of_new_block( node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None) if old_node is not None: # Verify that a peer using an older protocol version can receive # announcements from this node. sendcmpct.version = 1 # preferred_version-1 sendcmpct.announce = True old_node.send_and_ping(sendcmpct) # Header sync old_node.request_headers_and_sync(locator=[tip]) check_announcement_of_new_block( node, old_node, lambda p: p.last_cmpctblock is not None) # This test actually causes bitcoind to (reasonably!) disconnect us, so do # this last. def test_invalid_cmpctblock_message(self): self.nodes[0].generate(101) block = self.build_block_on_tip(self.nodes[0]) cmpct_block = P2PHeaderAndShortIDs() cmpct_block.header = CBlockHeader(block) cmpct_block.prefilled_txn_length = 1 # This index will be too high prefilled_txn = PrefilledTransaction(1, block.vtx[0]) cmpct_block.prefilled_txn = [prefilled_txn] self.test_node.send_and_ping(msg_cmpctblock(cmpct_block)) assert( int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock) # Compare the generated shortids to what we expect based on BIP 152, given # bitcoind's choice of nonce. def test_compactblock_construction(self, node, test_node): # Generate a bunch of transactions. node.generate(101) num_transactions = 25 address = node.getnewaddress() for i in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) assert(tx.wit.is_null()) # Wait until we've seen the block announcement for the resulting tip tip = int(node.getbestblockhash(), 16) assert(test_node.wait_for_block_announcement(tip)) # Make sure we will receive a fast-announce compact block self.request_cb_announcements(test_node, node) # Now mine a block, and look at the resulting compact block. test_node.clear_block_announcement() block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False)) [tx.calc_sha256() for tx in block.vtx] block.rehash() # Wait until the block was announced (via compact blocks) wait_until(test_node.received_block_announcement, timeout=30) assert(test_node.received_block_announcement()) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert(test_node.last_cmpctblock is not None) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_cmpctblock.header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) # Now fetch the compact block using a normal non-announce getdata with mininode_lock: test_node.clear_block_announcement() inv = CInv(4, block_hash) # 4 == "CompactBlock" test_node.send_message(msg_getdata([inv])) wait_until(test_node.received_block_announcement, timeout=30) assert(test_node.received_block_announcement()) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: assert(test_node.last_cmpctblock is not None) # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs( test_node.last_cmpctblock.header_and_shortids) self.check_compactblock_construction_from_block( header_and_shortids, block_hash, block) def check_compactblock_construction_from_block(self, header_and_shortids, block_hash, block): # Check that we got the right block! header_and_shortids.header.calc_sha256() assert_equal(header_and_shortids.header.sha256, block_hash) # Make sure the prefilled_txn appears to have included the coinbase assert(len(header_and_shortids.prefilled_txn) >= 1) assert_equal(header_and_shortids.prefilled_txn[0].index, 0) # Check that all prefilled_txn entries match what's in the block. for entry in header_and_shortids.prefilled_txn: entry.tx.calc_sha256() # This checks the non-witness parts of the tx agree assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256) # And this checks the witness wtxid = entry.tx.calc_sha256(True) # Shouldn't have received a witness assert(entry.tx.wit.is_null()) # Check that the cmpctblock message announced all the transactions. assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx)) # And now check that all the shortids are as expected as well. # Determine the siphash keys to use. [k0, k1] = header_and_shortids.get_siphash_keys() index = 0 while index < len(block.vtx): if (len(header_and_shortids.prefilled_txn) > 0 and header_and_shortids.prefilled_txn[0].index == index): # Already checked prefilled transactions above header_and_shortids.prefilled_txn.pop(0) else: tx_hash = block.vtx[index].sha256 shortid = calculate_shortid(k0, k1, tx_hash) assert_equal(shortid, header_and_shortids.shortids[0]) header_and_shortids.shortids.pop(0) index += 1 # Test that bitcoind requests compact blocks when we announce new blocks # via header or inv, and that responding to getblocktxn causes the block # to be successfully reconstructed. def test_compactblock_requests(self, node, test_node, version): # Try announcing a block with an inv or header, expect a compactblock # request for announce in ["inv", "header"]: block = self.build_block_on_tip(node) with mininode_lock: test_node.last_getdata = None if announce == "inv": test_node.send_message(msg_inv([CInv(2, block.sha256)])) success = wait_until( lambda: test_node.last_getheaders is not None, timeout=30) assert(success) test_node.send_header_for_blocks([block]) else: test_node.send_header_for_blocks([block]) success = wait_until( lambda: test_node.last_getdata is not None, timeout=30) assert(success) assert_equal(len(test_node.last_getdata.inv), 1) assert_equal(test_node.last_getdata.inv[0].type, 4) assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) # Send back a compactblock message that omits the coinbase comp_block = HeaderAndShortIDs() comp_block.header = CBlockHeader(block) comp_block.nonce = 0 [k0, k1] = comp_block.get_siphash_keys() coinbase_hash = block.vtx[0].sha256 if version == 2: coinbase_hash = block.vtx[0].calc_sha256(True) comp_block.shortids = [ calculate_shortid(k0, k1, coinbase_hash)] test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # Expect a getblocktxn message. with mininode_lock: assert(test_node.last_getblocktxn is not None) absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [0]) # should be a coinbase request # Send the coinbase, and verify that the tip advances. if version == 2: msg = msg_witness_blocktxn() else: msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = [block.vtx[0]] test_node.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) # Create a chain of transactions from given utxo, and add to a new block. def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) for i in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE]))) tx.rehash() utxo = [tx.sha256, 0, tx.vout[0].nValue] block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.solve() return block # Test that we only receive getblocktxn requests for transactions that the # node needs, and that responding to them causes the block to be # reconstructed. def test_getblocktxn_requests(self, node, test_node, version): with_witness = (version == 2) def test_getblocktxn_response(compact_block, peer, expected_result): msg = msg_cmpctblock(compact_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert(peer.last_getblocktxn is not None) absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute( ) assert_equal(absolute_indexes, expected_result) def test_tip_after_message(node, peer, msg, tip): peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), tip) # First try announcing compactblocks that won't reconstruct, and verify # that we receive getblocktxn messages back. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block, use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5]) msg_bt = msg_blocktxn() if with_witness: msg_bt = msg_witness_blocktxn() # serialize with witnesses msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[1:]) test_tip_after_message(node, test_node, msg_bt, block.sha256) utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now try interspersing the prefilled transactions comp_block.initialize_from_block( block, prefill_list=[0, 1, 5], use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [2, 3, 4]) msg_bt.block_transactions = BlockTransactions( block.sha256, block.vtx[2:5]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now try giving one transaction ahead of time. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) test_node.send_and_ping(msg_tx(block.vtx[1])) assert(block.vtx[1].hash in node.getrawmempool()) # Prefill 4 out of the 6 transactions, and verify that only the one # that was not in the mempool is requested. comp_block.initialize_from_block( block, prefill_list=[0, 2, 3, 4], use_witness=with_witness) test_getblocktxn_response(comp_block, test_node, [5]) msg_bt.block_transactions = BlockTransactions( block.sha256, [block.vtx[5]]) test_tip_after_message(node, test_node, msg_bt, block.sha256) # Now provide all transactions to the node before the block is # announced and verify reconstruction happens immediately. utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) for tx in block.vtx[1:]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) # Clear out last request. with mininode_lock: test_node.last_getblocktxn = None # Send compact block comp_block.initialize_from_block( block, prefill_list=[0], use_witness=with_witness) test_tip_after_message( node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256) with mininode_lock: # Shouldn't have gotten a request for any transaction assert(test_node.last_getblocktxn is None) # Incorrectly responding to a getblocktxn shouldn't cause the block to be # permanently failed. def test_incorrect_blocktxn_response(self, node, test_node, version): if (len(self.utxos) == 0): self.make_utxos() utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Relay the first 5 transactions from the block in advance for tx in block.vtx[1:6]: test_node.send_message(msg_tx(tx)) test_node.sync_with_ping() # Make sure all transactions were accepted. mempool = node.getrawmempool() for tx in block.vtx[1:6]: assert(tx.hash in mempool) # Send compact block comp_block = HeaderAndShortIDs() comp_block.initialize_from_block( block, prefill_list=[0], use_witness=(version == 2)) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) absolute_indexes = [] with mininode_lock: assert(test_node.last_getblocktxn is not None) absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute( ) assert_equal(absolute_indexes, [6, 7, 8, 9, 10]) # Now give an incorrect response. # Note that it's possible for bitcoind to be smart enough to know we're # lying, since it could check to see if the shortid matches what we're # sending, and eg disconnect us for misbehavior. If that behavior # change were made, we could just modify this test by having a # different peer provide the block further down, so that we're still # verifying that the block isn't marked bad permanently. This is good # enough for now. msg = msg_blocktxn() if version == 2: msg = msg_witness_blocktxn() msg.block_transactions = BlockTransactions( block.sha256, [block.vtx[5]] + block.vtx[7:]) test_node.send_and_ping(msg) # Tip should not have updated assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) # We should receive a getdata request success = wait_until( lambda: test_node.last_getdata is not None, timeout=10) assert(success) assert_equal(len(test_node.last_getdata.inv), 1) assert(test_node.last_getdata.inv[ 0].type == 2 or test_node.last_getdata.inv[0].type == 2 | MSG_WITNESS_FLAG) assert_equal(test_node.last_getdata.inv[0].hash, block.sha256) # Deliver the block if version == 2: test_node.send_and_ping(msg_witness_block(block)) else: test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def test_getblocktxn_handler(self, node, test_node, version): # bitcoind will not send blocktxn responses for blocks whose height is # more than 10 blocks deep. MAX_GETBLOCKTXN_DEPTH = 10 chain_height = node.getblockcount() current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) block = FromHex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), []) num_to_request = random.randint(1, len(block.vtx)) msg.block_txn_request.from_absolute( sorted(random.sample(range(len(block.vtx)), num_to_request))) test_node.send_message(msg) success = wait_until( lambda: test_node.last_blocktxn is not None, timeout=10) assert(success) [tx.calc_sha256() for tx in block.vtx] with mininode_lock: assert_equal( test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16)) all_indices = msg.block_txn_request.to_absolute() for index in all_indices: tx = test_node.last_blocktxn.block_transactions.transactions.pop( 0) tx.calc_sha256() assert_equal(tx.sha256, block.vtx[index].sha256) if version == 1: # Witnesses should have been stripped assert(tx.wit.is_null()) else: # Check that the witness matches assert_equal( tx.calc_sha256(True), block.vtx[index].calc_sha256(True)) test_node.last_blocktxn = None current_height -= 1 # Next request should send a full block response, as we're past the # allowed depth for a blocktxn response. block_hash = node.getblockhash(current_height) msg.block_txn_request = BlockTransactionsRequest( int(block_hash, 16), [0]) with mininode_lock: test_node.last_block = None test_node.last_blocktxn = None test_node.send_and_ping(msg) with mininode_lock: test_node.last_block.block.calc_sha256() assert_equal( test_node.last_block.block.sha256, int(block_hash, 16)) assert_equal(test_node.last_blocktxn, None) def test_compactblocks_not_at_tip(self, node, test_node): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] for i in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) success = wait_until( lambda: test_node.last_cmpctblock is not None, timeout=30) assert(success) test_node.clear_block_announcement() node.generate(1) wait_until(test_node.received_block_announcement, timeout=30) test_node.clear_block_announcement() with mininode_lock: test_node.last_block = None test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))])) success = wait_until( lambda: test_node.last_block is not None, timeout=30) assert(success) with mininode_lock: test_node.last_block.block.calc_sha256() assert_equal( test_node.last_block.block.sha256, int(new_blocks[0], 16)) # Generate an old compactblock, and verify that it's not accepted. cur_height = node.getblockcount() hashPrevBlock = int(node.getblockhash(cur_height - 5), 16) block = self.build_block_on_tip(node) block.hashPrevBlock = hashPrevBlock block.solve() comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(block) test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) tips = node.getchaintips() found = False for x in tips: if x["hash"] == block.hash: assert_equal(x["status"], "headers-only") found = True break assert(found) # Requesting this block via getblocktxn should silently fail # (to avoid fingerprinting attacks). msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0]) with mininode_lock: test_node.last_blocktxn = None test_node.send_and_ping(msg) with mininode_lock: assert(test_node.last_blocktxn is None) def test_end_to_end_block_relay(self, node, listeners): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 10) [l.clear_block_announcement() for l in listeners] node.submitblock(ToHex(block)) for l in listeners: wait_until(lambda: l.received_block_announcement(), timeout=30) with mininode_lock: for l in listeners: assert(l.last_cmpctblock is not None) l.last_cmpctblock.header_and_shortids.header.calc_sha256() assert_equal( l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256) # Test that we don't get disconnected if we relay a compact block with valid header, # but invalid transactions. def test_invalid_tx_in_compactblock(self, node, test_node): assert(len(self.utxos)) utxo = self.utxos[0] block = self.build_block_with_transactions(node, utxo, 5) del block.vtx[3] block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Now send the compact block with all transactions prefilled, and # verify that we don't get disconnected. comp_block = HeaderAndShortIDs() comp_block.initialize_from_block( block, prefill_list=[0, 1, 2, 3, 4], use_witness=False) msg = msg_cmpctblock(comp_block.to_p2p()) test_node.send_and_ping(msg) # Check that the tip didn't advance assert(int(node.getbestblockhash(), 16) is not block.sha256) test_node.sync_with_ping() # Helper for enabling cb announcements # Send the sendcmpct request and sync headers def request_cb_announcements(self, peer, node, version=1): tip = node.getbestblockhash() peer.get_headers(locator=[int(tip, 16)], hashstop=0) msg = msg_sendcmpct() msg.version = version msg.announce = True peer.send_and_ping(msg) def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer): assert(len(self.utxos)) def announce_cmpct_block(node, peer): utxo = self.utxos.pop(0) block = self.build_block_with_transactions(node, utxo, 5) cmpct_block = HeaderAndShortIDs() cmpct_block.initialize_from_block(block) msg = msg_cmpctblock(cmpct_block.to_p2p()) peer.send_and_ping(msg) with mininode_lock: assert(peer.last_getblocktxn is not None) return block, cmpct_block block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() mempool = node.getrawmempool() for tx in block.vtx[1:]: assert(tx.hash in mempool) delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert_equal(int(node.getbestblockhash(), 16), block.sha256) self.utxos.append( [block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue]) # Now test that delivering an invalid compact block won't break relay block, cmpct_block = announce_cmpct_block(node, stalling_peer) for tx in block.vtx[1:]: delivery_peer.send_message(msg_tx(tx)) delivery_peer.sync_with_ping() cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [CTxInWitness()] cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[ 0].scriptWitness.stack = [ser_uint256(0)] cmpct_block.use_witness = True delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p())) assert(int(node.getbestblockhash(), 16) != block.sha256) msg = msg_blocktxn() msg.block_transactions.blockhash = block.sha256 msg.block_transactions.transactions = block.vtx[1:] stalling_peer.send_and_ping(msg) assert_equal(int(node.getbestblockhash(), 16), block.sha256) def run_test(self): # Setup the p2p connections and start up the network thread. self.test_node = TestNode() self.ex_softfork_node = TestNode() self.old_node = TestNode() # version 1 peer connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.ex_softfork_node, services=NODE_NETWORK)) connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.old_node, services=NODE_NETWORK)) self.test_node.add_connection(connections[0]) self.ex_softfork_node.add_connection(connections[1]) self.old_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here self.test_node.wait_for_verack() # We will need UTXOs to construct transactions in later tests. self.make_utxos() self.log.info("Running tests:") self.log.info("\tTesting SENDCMPCT p2p message... ") self.test_sendcmpct(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_sendcmpct( self.nodes[1], self.ex_softfork_node, 1, old_node=self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock construction...") self.test_compactblock_construction(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblock_construction( self.nodes[1], self.ex_softfork_node) sync_blocks(self.nodes) self.log.info("\tTesting compactblock requests... ") self.test_compactblock_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_compactblock_requests( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn requests...") self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_requests(self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) self.log.info("\tTesting getblocktxn handler...") self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_getblocktxn_handler(self.nodes[1], self.ex_softfork_node, 2) self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1) sync_blocks(self.nodes) self.log.info( "\tTesting compactblock requests/announcements not at chain tip...") self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node) sync_blocks(self.nodes) self.test_compactblocks_not_at_tip( self.nodes[1], self.ex_softfork_node) self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting handling of incorrect blocktxn responses...") self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1) sync_blocks(self.nodes) self.test_incorrect_blocktxn_response( self.nodes[1], self.ex_softfork_node, 2) sync_blocks(self.nodes) # End-to-end block relay tests self.log.info("\tTesting end-to-end block relay...") self.request_cb_announcements(self.test_node, self.nodes[0]) self.request_cb_announcements(self.old_node, self.nodes[1]) self.request_cb_announcements( self.ex_softfork_node, self.nodes[1], version=2) self.test_end_to_end_block_relay( self.nodes[0], [self.ex_softfork_node, self.test_node, self.old_node]) self.test_end_to_end_block_relay( self.nodes[1], [self.ex_softfork_node, self.test_node, self.old_node]) self.log.info("\tTesting handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node) self.test_invalid_tx_in_compactblock( self.nodes[1], self.ex_softfork_node) self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node) self.log.info( "\tTesting reconstructing compact blocks from all peers...") self.test_compactblock_reconstruction_multiple_peers( self.nodes[1], self.ex_softfork_node, self.old_node) sync_blocks(self.nodes) self.log.info("\tTesting invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() if __name__ == '__main__': CompactBlocksTest().main() diff --git a/qa/rpc-tests/p2p-feefilter.py b/qa/rpc-tests/p2p-feefilter.py index 7d4607ff3..1cb5bb616 100755 --- a/qa/rpc-tests/p2p-feefilter.py +++ b/qa/rpc-tests/p2p-feefilter.py @@ -1,130 +1,119 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time ''' FeeFilterTest -- test processing of feefilter messages ''' def hashToHex(hash): return format(hash, '064x') # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): for x in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True time.sleep(1) return False # TestNode: bare-bones "peer". Used to track which invs are received from a node # and to send the node feefilter messages. class TestNode(SingleNodeConnCB): def __init__(self): SingleNodeConnCB.__init__(self) self.txinvs = [] def on_inv(self, conn, message): for i in message.inv: if (i.type == 1): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): with mininode_lock: self.txinvs = [] def send_filter(self, feerate): self.send_message(msg_feefilter(feerate)) self.sync_with_ping() class FeeFilterTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False - def setup_network(self): - # Node1 will be used to generate txs which should be relayed from Node0 - # to our test node - self.nodes = [] - extra_args = ["-logtimemicros"] - self.nodes.append( - start_node(0, self.options.tmpdir, extra_args)) - self.nodes.append( - start_node(1, self.options.tmpdir, extra_args)) - connect_nodes(self.nodes[0], 1) - def run_test(self): node1 = self.nodes[1] node0 = self.nodes[0] # Get out of IBD node1.generate(1) sync_blocks(self.nodes) # Setup the p2p connections and start up the network thread. test_node = TestNode() connection = NodeConn( '127.0.0.1', p2p_port(0), self.nodes[0], test_node) test_node.add_connection(connection) NetworkThread().start() test_node.wait_for_verack() # Test that invs are received for all txs at feerate of 20 sat/byte node1.settxfee(Decimal("0.00020000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() # Set a filter of 15 sat/byte test_node.send_filter(15000) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() # Change tx fee rate to 10 sat/byte and test they are no longer # received node1.settxfee(Decimal("0.00010000")) [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] sync_mempools(self.nodes) # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is # unfortunately reliant on the current relay behavior where we batch up # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. node0.settxfee(Decimal("0.00020000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() # Remove fee filter and check that txs are received again test_node.send_filter(0) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() if __name__ == '__main__': FeeFilterTest().main() diff --git a/qa/rpc-tests/p2p-leaktests.py b/qa/rpc-tests/p2p-leaktests.py index e6ec6c5e2..1322d568c 100755 --- a/qa/rpc-tests/p2p-leaktests.py +++ b/qa/rpc-tests/p2p-leaktests.py @@ -1,188 +1,183 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * ''' Test for message sending before handshake completion A node should never send anything other than VERSION/VERACK/REJECT until it's received a VERACK. This test connects to a node and sends it a few messages, trying to intice it into sending us something it shouldn't. ''' banscore = 10 class CLazyNode(NodeConnCB): def __init__(self): self.connection = None self.unexpected_msg = False self.connected = False super().__init__() def add_connection(self, conn): self.connection = conn def send_message(self, message): self.connection.send_message(message) def bad_message(self, message): self.unexpected_msg = True self.log.info("should not have received message: %s" % message.command) def on_open(self, conn): self.connected = True def on_version(self, conn, message): self.bad_message(message) def on_verack(self, conn, message): self.bad_message(message) def on_reject(self, conn, message): self.bad_message(message) def on_inv(self, conn, message): self.bad_message(message) def on_addr(self, conn, message): self.bad_message(message) def on_alert(self, conn, message): self.bad_message(message) def on_getdata(self, conn, message): self.bad_message(message) def on_getblocks(self, conn, message): self.bad_message(message) def on_tx(self, conn, message): self.bad_message(message) def on_block(self, conn, message): self.bad_message(message) def on_getaddr(self, conn, message): self.bad_message(message) def on_headers(self, conn, message): self.bad_message(message) def on_getheaders(self, conn, message): self.bad_message(message) def on_ping(self, conn, message): self.bad_message(message) def on_mempool(self, conn): self.bad_message(message) def on_pong(self, conn, message): self.bad_message(message) def on_feefilter(self, conn, message): self.bad_message(message) def on_sendheaders(self, conn, message): self.bad_message(message) def on_sendcmpct(self, conn, message): self.bad_message(message) def on_cmpctblock(self, conn, message): self.bad_message(message) def on_getblocktxn(self, conn, message): self.bad_message(message) def on_blocktxn(self, conn, message): self.bad_message(message) # Node that never sends a version. We'll use this to send a bunch of messages # anyway, and eventually get disconnected. class CNodeNoVersionBan(CLazyNode): def __init__(self): super().__init__() # send a bunch of veracks without sending a message. This should get us disconnected. # NOTE: implementation-specific check here. Remove if bitcoind ban # behavior changes def on_open(self, conn): super().on_open(conn) for i in range(banscore): self.send_message(msg_verack()) def on_reject(self, conn, message): pass # Node that never sends a version. This one just sits idle and hopes to receive # any message (it shouldn't!) class CNodeNoVersionIdle(CLazyNode): def __init__(self): super().__init__() # Node that sends a version but not a verack. class CNodeNoVerackIdle(CLazyNode): def __init__(self): self.version_received = False super().__init__() def on_reject(self, conn, message): pass def on_verack(self, conn, message): pass # When version is received, don't reply with a verack. Instead, see if the # node will give us a message that it shouldn't. This is not an exhaustive # list! def on_version(self, conn, message): self.version_received = True conn.send_message(msg_ping()) conn.send_message(msg_getaddr()) class P2PLeakTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 - - def setup_network(self): - extra_args = [['-banscore=' + str(banscore)] - for i in range(self.num_nodes)] - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, extra_args) + self.extra_args = [['-banscore=' + str(banscore)]] def run_test(self): no_version_bannode = CNodeNoVersionBan() no_version_idlenode = CNodeNoVersionIdle() no_verack_idlenode = CNodeNoVerackIdle() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False)) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False)) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode)) no_version_bannode.add_connection(connections[0]) no_version_idlenode.add_connection(connections[1]) no_verack_idlenode.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread assert( wait_until(lambda: no_version_bannode.connected and no_version_idlenode.connected and no_verack_idlenode.version_received, timeout=10)) # Mine a block and make sure that it's not sent to the connected nodes self.nodes[0].generate(1) # Give the node enough time to possibly leak out a message time.sleep(5) # This node should have been banned assert(no_version_bannode.connection.state == "closed") [conn.disconnect_node() for conn in connections] # Make sure no unexpected messages came in assert(no_version_bannode.unexpected_msg == False) assert(no_version_idlenode.unexpected_msg == False) assert(no_verack_idlenode.unexpected_msg == False) if __name__ == '__main__': P2PLeakTest().main() diff --git a/qa/rpc-tests/p2p-mempool.py b/qa/rpc-tests/p2p-mempool.py index 87f7503e1..7d7bbd8d2 100755 --- a/qa/rpc-tests/p2p-mempool.py +++ b/qa/rpc-tests/p2p-mempool.py @@ -1,105 +1,100 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() self.block_receive_map = {} def add_connection(self, conn): self.connection = conn self.peer_disconnected = False def on_inv(self, conn, message): pass # Track the last getdata message we receive (used in the test) def on_getdata(self, conn, message): self.last_getdata = message def on_block(self, conn, message): message.block.calc_sha256() try: self.block_receive_map[message.block.sha256] += 1 except KeyError as e: self.block_receive_map[message.block.sha256] = 1 # Spin until verack message is received from the node. # We use this to signal that our test can begin. This # is called from the testing thread, so it needs to acquire # the global lock. def wait_for_verack(self): def veracked(): return self.verack_received return wait_until(veracked, timeout=10) def wait_for_disconnect(self): def disconnected(): return self.peer_disconnected return wait_until(disconnected, timeout=10) # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message def on_close(self, conn): self.peer_disconnected = True # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): def received_pong(): return (self.last_pong.nonce == self.ping_counter) self.connection.send_message(msg_ping(nonce=self.ping_counter)) success = wait_until(received_pong, timeout=timeout) self.ping_counter += 1 return success def send_mempool(self): self.lastInv = [] self.send_message(msg_mempool()) class P2PMempoolTests(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True - self.num_nodes = 2 - - def setup_network(self): - # Start a node with maxuploadtarget of 200 MB (/24h) - self.nodes = [] - self.nodes.append( - start_node(0, self.options.tmpdir, ["-peerbloomfilters=0"])) + self.num_nodes = 1 + self.extra_args = [["-peerbloomfilters=0"]] def run_test(self): # connect a mininode aTestNode = TestNode() node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode) aTestNode.add_connection(node) NetworkThread().start() aTestNode.wait_for_verack() # request mempool aTestNode.send_mempool() aTestNode.wait_for_disconnect() # mininode must be disconnected at this point assert_equal(len(self.nodes[0].getpeerinfo()), 0) if __name__ == '__main__': P2PMempoolTests().main() diff --git a/qa/rpc-tests/p2p-timeouts.py b/qa/rpc-tests/p2p-timeouts.py index 5f8768780..bc80d7abd 100755 --- a/qa/rpc-tests/p2p-timeouts.py +++ b/qa/rpc-tests/p2p-timeouts.py @@ -1,109 +1,103 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ TimeoutsTest -- test various net timeouts (only in extended tests) - Create three bitcoind nodes: no_verack_node - we never send a verack in response to their version no_version_node - we never send a version (only a ping) no_send_node - we never send any P2P message. - Start all three nodes - Wait 1 second - Assert that we're connected - Send a ping to no_verack_node and no_version_node - Wait 30 seconds - Assert that we're still connected - Send a ping to no_verack_node and no_version_node - Wait 31 seconds - Assert that we're no longer connected (timeout to receive version/verack is 60 seconds) """ from time import sleep from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class TestNode(SingleNodeConnCB): def __init__(self): SingleNodeConnCB.__init__(self) self.connected = False self.received_version = False def on_open(self, conn): self.connected = True def on_close(self, conn): self.connected = False def on_version(self, conn, message): # Don't send a verack in response self.received_version = True class TimeoutsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - def setup_network(self): - self.nodes = [] - - # Start up node0 to be a version 1, pre-segwit node. - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - def run_test(self): # Setup the p2p connections and start up the network thread. self.no_verack_node = TestNode() # never send verack self.no_version_node = TestNode() # never send version (just ping) self.no_send_node = TestNode() # never send anything connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_verack_node)) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_version_node, send_version=False)) connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.no_send_node, send_version=False)) self.no_verack_node.add_connection(connections[0]) self.no_version_node.add_connection(connections[1]) self.no_send_node.add_connection(connections[2]) NetworkThread().start() # Start up network handling in another thread sleep(1) assert(self.no_verack_node.connected) assert(self.no_version_node.connected) assert(self.no_send_node.connected) ping_msg = msg_ping() connections[0].send_message(ping_msg) connections[1].send_message(ping_msg) sleep(30) assert(self.no_verack_node.received_version) assert(self.no_verack_node.connected) assert(self.no_version_node.connected) assert(self.no_send_node.connected) connections[0].send_message(ping_msg) connections[1].send_message(ping_msg) sleep(31) assert(not self.no_verack_node.connected) assert(not self.no_version_node.connected) assert(not self.no_send_node.connected) if __name__ == '__main__': TimeoutsTest().main() diff --git a/qa/rpc-tests/p2p-versionbits-warning.py b/qa/rpc-tests/p2p-versionbits-warning.py index 93a392d54..3fd2e3f11 100755 --- a/qa/rpc-tests/p2p-versionbits-warning.py +++ b/qa/rpc-tests/p2p-versionbits-warning.py @@ -1,180 +1,179 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import re import time from test_framework.blocktools import create_block, create_coinbase ''' Test version bits' warning system. Generate chains with block versions that appear to be signalling unknown soft-forks, and test that warning alerts are generated. ''' VB_PERIOD = 144 # versionbits period length for regtest VB_THRESHOLD = 108 # versionbits activation threshold for regtest VB_TOP_BITS = 0x20000000 VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect" WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format( VB_UNKNOWN_BIT) VB_PATTERN = re.compile("^Warning.*versionbit") # TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending # p2p messages to a node, generating the messages in the main testing logic. class TestNode(NodeConnCB): def __init__(self): NodeConnCB.__init__(self) self.connection = None self.ping_counter = 1 self.last_pong = msg_pong() def add_connection(self, conn): self.connection = conn def on_inv(self, conn, message): pass # Wrapper for the NodeConn's send_message function def send_message(self, message): self.connection.send_message(message) def on_pong(self, conn, message): self.last_pong = message # Sync up with the node after delivery of a block def sync_with_ping(self, timeout=30): self.connection.send_message(msg_ping(nonce=self.ping_counter)) received_pong = False sleep_time = 0.05 while not received_pong and timeout > 0: time.sleep(sleep_time) timeout -= sleep_time with mininode_lock: if self.last_pong.nonce == self.ping_counter: received_pong = True self.ping_counter += 1 return received_pong class VersionBitsWarningTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 def setup_network(self): self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt") # Open and close to create zero-length file with open(self.alert_filename, 'w', encoding='utf8') as _: pass self.extra_args = [ ["-alertnotify=echo %s >> \"" + self.alert_filename + "\""]] - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, self.extra_args) + self.setup_nodes() # Send numblocks blocks via peer with nVersionToUse set. def send_blocks_with_version(self, peer, numblocks, nVersionToUse): tip = self.nodes[0].getbestblockhash() height = self.nodes[0].getblockcount() block_time = self.nodes[0].getblockheader(tip)["time"] + 1 tip = int(tip, 16) for _ in range(numblocks): block = create_block(tip, create_coinbase(height + 1), block_time) block.nVersion = nVersionToUse block.solve() peer.send_message(msg_block(block)) block_time += 1 height += 1 tip = block.sha256 peer.sync_with_ping() def test_versionbits_in_alert_file(self): with open(self.alert_filename, 'r', encoding='utf8') as f: alert_text = f.read() assert(VB_PATTERN.match(alert_text)) def run_test(self): # Setup the p2p connection and start up the network thread. test_node = TestNode() connections = [] connections.append( NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)) test_node.add_connection(connections[0]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here test_node.wait_for_verack() # 1. Have the node mine one period worth of blocks self.nodes[0].generate(VB_PERIOD) # 2. Now build one period of blocks on the tip, with < VB_THRESHOLD # blocks signaling some unknown bit. nVersion = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT) self.send_blocks_with_version(test_node, VB_THRESHOLD - 1, nVersion) # Fill rest of period with regular version blocks self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1) # Check that we're not getting any versionbit-related errors in # get*info() assert(not VB_PATTERN.match(self.nodes[0].getinfo()["errors"])) assert(not VB_PATTERN.match(self.nodes[0].getmininginfo()["errors"])) assert(not VB_PATTERN.match( self.nodes[0].getnetworkinfo()["warnings"])) # 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling # some unknown bit self.send_blocks_with_version(test_node, VB_THRESHOLD, nVersion) self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD) # Might not get a versionbits-related alert yet, as we should # have gotten a different alert due to more than 51/100 blocks # being of unexpected version. # Check that get*info() shows some kind of error. assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getinfo()["errors"]) assert(WARN_UNKNOWN_RULES_MINED in self.nodes[ 0].getmininginfo()["errors"]) assert(WARN_UNKNOWN_RULES_MINED in self.nodes[ 0].getnetworkinfo()["warnings"]) # Mine a period worth of expected blocks so the generic block-version warning # is cleared, and restart the node. This should move the versionbit state # to ACTIVE. self.nodes[0].generate(VB_PERIOD) stop_nodes(self.nodes) # Empty out the alert file with open(self.alert_filename, 'w', encoding='utf8') as _: pass self.nodes = start_nodes( self.num_nodes, self.options.tmpdir, self.extra_args) # Connecting one block should be enough to generate an error. self.nodes[0].generate(1) assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getinfo()["errors"]) assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[ 0].getmininginfo()["errors"]) assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[ 0].getnetworkinfo()["warnings"]) stop_nodes(self.nodes) self.test_versionbits_in_alert_file() # Test framework expects the node to still be running... self.nodes = start_nodes( self.num_nodes, self.options.tmpdir, self.extra_args) if __name__ == '__main__': VersionBitsWarningTest().main() diff --git a/qa/rpc-tests/preciousblock.py b/qa/rpc-tests/preciousblock.py index 0ca5dc2a3..7a66aae37 100755 --- a/qa/rpc-tests/preciousblock.py +++ b/qa/rpc-tests/preciousblock.py @@ -1,127 +1,127 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test PreciousBlock code # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, connect_nodes_bi, sync_chain, sync_blocks, ) def unidirectional_node_sync_via_rpc(node_src, node_dest): blocks_to_copy = [] blockhash = node_src.getbestblockhash() while True: try: assert(len(node_dest.getblock(blockhash, False)) > 0) break except: blocks_to_copy.append(blockhash) blockhash = node_src.getblockheader( blockhash, True)['previousblockhash'] blocks_to_copy.reverse() for blockhash in blocks_to_copy: blockdata = node_src.getblock(blockhash, False) assert(node_dest.submitblock(blockdata) in (None, 'inconclusive')) def node_sync_via_rpc(nodes): for node_src in nodes: for node_dest in nodes: if node_src is node_dest: continue unidirectional_node_sync_via_rpc(node_src, node_dest) class PreciousTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self): - self.nodes = self.setup_nodes() + self.setup_nodes() def run_test(self): self.log.info( "Ensure submitblock can in principle reorg to a competing chain") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 1) (hashY, hashZ) = self.nodes[1].generate(2) assert_equal(self.nodes[1].getblockcount(), 2) node_sync_via_rpc(self.nodes[0:3]) assert_equal(self.nodes[0].getbestblockhash(), hashZ) self.log.info("Mine blocks A-B-C on Node 0") (hashA, hashB, hashC) = self.nodes[0].generate(3) assert_equal(self.nodes[0].getblockcount(), 5) self.log.info("Mine competing blocks E-F-G on Node 1") (hashE, hashF, hashG) = self.nodes[1].generate(3) assert_equal(self.nodes[1].getblockcount(), 5) assert(hashC != hashG) self.log.info("Connect nodes and check no reorg occurs") # Submit competing blocks via RPC so any reorg should occur before we # proceed (no way to wait on inaction for p2p sync) node_sync_via_rpc(self.nodes[0:2]) connect_nodes_bi(self.nodes, 0, 1) assert_equal(self.nodes[0].getbestblockhash(), hashC) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block C again") self.nodes[0].preciousblock(hashC) assert_equal(self.nodes[0].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block C") self.nodes[1].preciousblock(hashC) sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info("Make Node1 prefer block G again") self.nodes[1].preciousblock(hashG) assert_equal(self.nodes[1].getbestblockhash(), hashG) self.log.info("Make Node0 prefer block G again") self.nodes[0].preciousblock(hashG) assert_equal(self.nodes[0].getbestblockhash(), hashG) self.log.info("Make Node1 prefer block C again") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashC) self.log.info( "Mine another block (E-F-G-)H on Node 0 and reorg Node 1") self.nodes[0].generate(1) assert_equal(self.nodes[0].getblockcount(), 6) sync_blocks(self.nodes[0:2]) hashH = self.nodes[0].getbestblockhash() assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Node1 should not be able to prefer block C anymore") self.nodes[1].preciousblock(hashC) assert_equal(self.nodes[1].getbestblockhash(), hashH) self.log.info("Mine competing blocks I-J-K-L on Node 2") self.nodes[2].generate(4) assert_equal(self.nodes[2].getblockcount(), 6) hashL = self.nodes[2].getbestblockhash() self.log.info("Connect nodes and check no reorg occurs") node_sync_via_rpc(self.nodes[1:3]) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) assert_equal(self.nodes[0].getbestblockhash(), hashH) assert_equal(self.nodes[1].getbestblockhash(), hashH) assert_equal(self.nodes[2].getbestblockhash(), hashL) self.log.info("Make Node1 prefer block L") self.nodes[1].preciousblock(hashL) assert_equal(self.nodes[1].getbestblockhash(), hashL) self.log.info("Make Node2 prefer block H") self.nodes[2].preciousblock(hashH) assert_equal(self.nodes[2].getbestblockhash(), hashH) if __name__ == '__main__': PreciousTest().main() diff --git a/qa/rpc-tests/prioritise_transaction.py b/qa/rpc-tests/prioritise_transaction.py index 6127a434d..f5443ff97 100755 --- a/qa/rpc-tests/prioritise_transaction.py +++ b/qa/rpc-tests/prioritise_transaction.py @@ -1,160 +1,154 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test PrioritiseTransaction code # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # FIXME: review how this test needs to be adapted w.r.t _LEGACY_MAX_BLOCK_SIZE from test_framework.mininode import COIN from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE class PrioritiseTransactionTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 + self.extra_args = [["-printpriority=1"]] + def run_test(self): self.txouts = gen_return_txouts() - - def setup_network(self): - self.nodes = [] - self.is_network_split = False - - self.nodes.append( - start_node(0, self.options.tmpdir, ["-printpriority=1"])) self.relayfee = self.nodes[0].getnetworkinfo()['relayfee'] - def run_test(self): utxo_count = 90 utxos = create_confirmed_utxos( self.relayfee, self.nodes[0], utxo_count) # our transactions are smaller than 100kb base_fee = self.relayfee * 100 txids = [] # Create 3 batches of transactions at 3 different fee rate levels range_size = utxo_count // 3 for i in range(3): txids.append([]) start_range = i * range_size end_range = start_range + range_size txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[ start_range:end_range], end_range - start_range, (i + 1) * base_fee) # Make sure that the size of each group of transactions exceeds # LEGACY_MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create # more transactions. mempool = self.nodes[0].getrawmempool(True) sizes = [0, 0, 0] for i in range(3): for j in txids[i]: assert(j in mempool) sizes[i] += mempool[j]['size'] # Fail => raise utxo_count assert(sizes[i] > LEGACY_MAX_BLOCK_SIZE) # add a fee delta to something in the cheapest bucket and make sure it gets mined # also check that a different entry in the cheapest bucket is NOT mined (lower # the priority to ensure its not mined due to priority) self.nodes[0].prioritisetransaction( txids[0][0], 0, int(3 * base_fee * COIN)) self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0) self.nodes[0].generate(1) mempool = self.nodes[0].getrawmempool() self.log.info("Assert that prioritised transaction was mined") assert(txids[0][0] not in mempool) assert(txids[0][1] in mempool) high_fee_tx = None for x in txids[2]: if x not in mempool: high_fee_tx = x # Something high-fee should have been mined! assert(high_fee_tx != None) # Add a prioritisation before a tx is in the mempool (de-prioritising a # high-fee transaction so that it's now low fee). self.nodes[0].prioritisetransaction( high_fee_tx, -1e15, -int(2 * base_fee * COIN)) # Add everything back to mempool self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) # Check to make sure our high fee rate tx is back in the mempool mempool = self.nodes[0].getrawmempool() assert(high_fee_tx in mempool) # Now verify the modified-high feerate transaction isn't mined before # the other high fee transactions. Keep mining until our mempool has # decreased by all the high fee size that we calculated above. while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]): self.nodes[0].generate(1) # High fee transaction should not have been mined, but other high fee rate # transactions should have been. mempool = self.nodes[0].getrawmempool() self.log.info( "Assert that de-prioritised transaction is still in mempool") assert(high_fee_tx in mempool) for x in txids[2]: if (x != high_fee_tx): assert(x not in mempool) # Create a free, low priority transaction. Should be rejected. utxo_list = self.nodes[0].listunspent() assert(len(utxo_list) > 0) utxo = utxo_list[0] inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) tx_hex = self.nodes[0].signrawtransaction( raw_tx, None, None, "ALL|FORKID")["hex"] txid = self.nodes[0].sendrawtransaction(tx_hex) # A tx that spends an in-mempool tx has 0 priority, so we can use it to # test the effect of using prioritise transaction for mempool # acceptance inputs = [] inputs.append({"txid": txid, "vout": 0}) outputs = {} outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs) tx2_hex = self.nodes[0].signrawtransaction( raw_tx2, None, None, "ALL|FORKID")["hex"] tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"] try: self.nodes[0].sendrawtransaction(tx2_hex) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee assert(tx2_id not in self.nodes[0].getrawmempool()) else: assert(False) # This is a less than 1000-byte transaction, so just set the fee # to be the minimum for a 1000 byte transaction and check that it is # accepted. self.nodes[0].prioritisetransaction( tx2_id, 0, int(self.relayfee * COIN)) self.log.info( "Assert that prioritised free transaction is accepted to mempool") assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id) assert(tx2_id in self.nodes[0].getrawmempool()) if __name__ == '__main__': PrioritiseTransactionTest().main() diff --git a/qa/rpc-tests/proxy_test.py b/qa/rpc-tests/proxy_test.py index f663743e9..e321b0958 100755 --- a/qa/rpc-tests/proxy_test.py +++ b/qa/rpc-tests/proxy_test.py @@ -1,218 +1,219 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import socket import os from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( PORT_MIN, PORT_RANGE, start_nodes, assert_equal, ) from test_framework.netutil import test_ipv6_local ''' Test plan: - Start bitcoind's with different proxy configurations - Use addnode to initiate connections - Verify that proxies are connected to, and the right connection command is given - Proxy configurations to test on bitcoind side: - `-proxy` (proxy everything) - `-onion` (proxy just onions) - `-proxyrandomize` Circuit randomization - Proxy configurations to test on proxy side, - support no authentication (other proxy) - support no authentication + user/pass authentication (Tor) - proxy on IPv6 - Create various proxies (as threads) - Create bitcoinds that connect to them - Manipulate the bitcoinds using addnode (onetry) an observe effects addnode connect to IPv4 addnode connect to IPv6 addnode connect to onion addnode connect to generic DNS name ''' RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports class ProxyTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def setup_nodes(self): self.have_ipv6 = test_ipv6_local() # Create two proxies on different ports # ... one unauthenticated self.conf1 = Socks5Configuration() self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000)) self.conf1.unauth = True self.conf1.auth = False # ... one supporting authenticated and unauthenticated (Tor) self.conf2 = Socks5Configuration() self.conf2.addr = ( '127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000)) self.conf2.unauth = True self.conf2.auth = True if self.have_ipv6: # ... one on IPv6 with similar configuration self.conf3 = Socks5Configuration() self.conf3.af = socket.AF_INET6 self.conf3.addr = ( '::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000)) self.conf3.unauth = True self.conf3.auth = True else: self.log.info("Warning: testing without local IPv6 support") self.serv1 = Socks5Server(self.conf1) self.serv1.start() self.serv2 = Socks5Server(self.conf2) self.serv2.start() if self.have_ipv6: self.serv3 = Socks5Server(self.conf3) self.serv3.start() # Note: proxies are not used to connect to local nodes # this is because the proxy to use is based on CService.GetNetwork(), # which return NET_UNROUTABLE for localhost args = [ ['-listen', '-proxy=%s:%i' % (self.conf1.addr), '-proxyrandomize=1'], ['-listen', '-proxy=%s:%i' % (self.conf1.addr), '-onion=%s:%i' % (self.conf2.addr), '-proxyrandomize=0'], ['-listen', '-proxy=%s:%i' % (self.conf2.addr), '-proxyrandomize=1'], [] ] if self.have_ipv6: args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr), '-proxyrandomize=0', '-noonion'] - return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args) + self.nodes = start_nodes( + self.num_nodes, self.options.tmpdir, extra_args=args) def node_test(self, node, proxies, auth, test_onion=True): rv = [] # Test: outgoing IPv4 connection through node node.addnode("15.61.23.23:1234", "onetry") cmd = proxies[0].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, # even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"15.61.23.23") assert_equal(cmd.port, 1234) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if self.have_ipv6: # Test: outgoing IPv6 connection through node node.addnode( "[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry") cmd = proxies[1].queue.get() assert(isinstance(cmd, Socks5Command)) # Note: bitcoind's SOCKS5 implementation only sends atyp # DOMAINNAME, even if connecting directly to IPv4/IPv6 assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534") assert_equal(cmd.port, 5443) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) if test_onion: # Test: outgoing onion connection through node node.addnode("bitcoinostk4e4re.onion:8333", "onetry") cmd = proxies[2].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"bitcoinostk4e4re.onion") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) # Test: outgoing DNS name connection through node node.addnode("node.noumenon:8333", "onetry") cmd = proxies[3].queue.get() assert(isinstance(cmd, Socks5Command)) assert_equal(cmd.atyp, AddressType.DOMAINNAME) assert_equal(cmd.addr, b"node.noumenon") assert_equal(cmd.port, 8333) if not auth: assert_equal(cmd.username, None) assert_equal(cmd.password, None) rv.append(cmd) return rv def run_test(self): # basic -proxy self.node_test( self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False) # -proxy plus -onion self.node_test( self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False) # -proxy plus -onion, -proxyrandomize rv = self.node_test( self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True) # Check that credentials as used for -proxyrandomize connections are # unique credentials = set((x.username, x.password) for x in rv) assert_equal(len(credentials), len(rv)) if self.have_ipv6: # proxy on IPv6 localhost self.node_test( self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False) def networks_dict(d): r = {} for x in d['networks']: r[x['name']] = x return r # test RPC getnetworkinfo n0 = networks_dict(self.nodes[0].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr)) assert_equal(n0[net]['proxy_randomize_credentials'], True) assert_equal(n0['onion']['reachable'], True) n1 = networks_dict(self.nodes[1].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr)) assert_equal(n1[net]['proxy_randomize_credentials'], False) assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr)) assert_equal(n1['onion']['proxy_randomize_credentials'], False) assert_equal(n1['onion']['reachable'], True) n2 = networks_dict(self.nodes[2].getnetworkinfo()) for net in ['ipv4', 'ipv6', 'onion']: assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr)) assert_equal(n2[net]['proxy_randomize_credentials'], True) assert_equal(n2['onion']['reachable'], True) if self.have_ipv6: n3 = networks_dict(self.nodes[3].getnetworkinfo()) for net in ['ipv4', 'ipv6']: assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr)) assert_equal(n3[net]['proxy_randomize_credentials'], False) assert_equal(n3['onion']['reachable'], False) if __name__ == '__main__': ProxyTest().main() diff --git a/qa/rpc-tests/pruning.py b/qa/rpc-tests/pruning.py index edd10ef8e..9ae77752a 100755 --- a/qa/rpc-tests/pruning.py +++ b/qa/rpc-tests/pruning.py @@ -1,532 +1,513 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test pruning code # ******** # WARNING: # This test uses 4GB of disk space. # This test takes 30 mins or more (up to 2 hours) # ******** from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.outputchecker import OutputChecker import time import os MIN_BLOCKS_TO_KEEP = 288 # Rescans start at the earliest block up to 2 hours before a key timestamp, so # the manual prune RPC avoids pruning blocks in the same window to be # compatible with pruning based on key creation time. RESCAN_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(blockdir + f)) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 6 - # Cache for utxos, as the listunspent may take a long time later in the - # test - self.utxo_cache_0 = [] - self.utxo_cache_1 = [] - - def setup_network(self): - self.nodes = [] - self.is_network_split = False - # Create nodes 0 and 1 to mine - self.nodes.append( - start_node(0, self.options.tmpdir, ["-maxreceivebuffer=20000", - "-checkblocks=5", - "-blockmaxsize=1000000"], - timewait=900)) - self.nodes.append( - start_node(1, self.options.tmpdir, ["-maxreceivebuffer=20000", - "-checkblocks=5", - "-blockmaxsize=1000000"], - timewait=900)) - # Create node 2 to test pruning - self.nodes.append( - start_node(2, self.options.tmpdir, ["-maxreceivebuffer=20000", - "-prune=550", - "-blockmaxsize=1000000"], - timewait=900)) - self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" - # Create nodes 3 and 4 to test manual pruning (they will be re-started # with manual pruning later) - self.nodes.append( - start_node(3, self.options.tmpdir, ["-maxreceivebuffer=20000", - "-blockmaxsize=1000000"], - timewait=900)) - self.nodes.append( - start_node(4, self.options.tmpdir, ["-maxreceivebuffer=20000", - "-blockmaxsize=1000000"], - timewait=900)) - # Create nodes 5 to test wallet in prune mode, but do not connect - self.nodes.append( - start_node(5, self.options.tmpdir, ["-prune=550", - "-blockmaxsize=1000000"])) + self.extra_args = [ + ["-maxreceivebuffer=20000", + "-blockmaxsize=999000", "-checkblocks=5"], + ["-maxreceivebuffer=20000", + "-blockmaxsize=999000", "-checkblocks=5"], + ["-maxreceivebuffer=20000", "-prune=550"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-maxreceivebuffer=20000", "-blockmaxsize=999000"], + ["-prune=550"]] - # Determine default relay fee - self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + def setup_network(self): + self.setup_nodes() + + self.prunedir = self.options.tmpdir + "/node2/regtest/blocks/" connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[1], 2) connect_nodes(self.nodes[2], 0) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[0], 4) sync_blocks(self.nodes[0:5]) def create_big_chain(self): # Start by creating some coinbases we can spend later self.nodes[1].generate(200) sync_blocks(self.nodes[0:2]) self.nodes[0].generate(150) # Then mine enough full blocks to create more than 550MiB of data for i in range(645): mine_large_block(self.nodes[0], self.utxo_cache_0) sync_blocks(self.nodes[0:5]) def test_height_min(self): if not os.path.isfile(self.prunedir + "blk00000.dat"): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) self.log.info( "Mining 25 more blocks should cause the first block file to be pruned") # Pruning doesn't run until we're allocating another chunk, 20 full # blocks past the height cutoff will ensure this for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) waitstart = time.time() while os.path.isfile(self.prunedir + "blk00000.dat"): time.sleep(0.1) if time.time() - waitstart > 30: raise AssertionError( "blk00000.dat not pruned when it should be") self.log.info("Success") usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") def create_chain_with_staleblocks(self): # Create stale blocks in manageable sized chunks self.log.info( "Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") for j in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects # Stopping node 0 also clears its mempool, so it doesn't have # node1's transactions to accidentally mine self.stop_node(0) self.nodes[0] = start_node( 0, self.options.tmpdir, ["-maxreceivebuffer=20000", "-checkblocks=5", "-blockmaxsize=1000000"], timewait=900) # Mine 24 blocks in node 1 for i in range(24): if j == 0: mine_large_block(self.nodes[1], self.utxo_cache_1) else: self.nodes[1].generate( 1) # tx's already in mempool from previous disconnects # Reorg back with 25 block chain from node 0 for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) # Create connections in the order so both nodes can see the reorg # at the same time connect_nodes(self.nodes[1], 0) connect_nodes(self.nodes[2], 0) sync_blocks(self.nodes[0:3]) self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir)) def reorg_test(self): # Node 1 will mine a 300 block chain starting 287 blocks back from Node # 0 and Node 2's tip. This will cause Node 2 to do a reorg requiring # 288 blocks of undo data to the reorg_test chain. Reboot node 1 to # clear its mempool (hopefully make the invalidate faster). Lower the # block max size so we don't keep mining all our big mempool # transactions (from disconnected blocks) self.stop_node(1) self.nodes[1] = start_node( 1, self.options.tmpdir, ["-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode"], timewait=900, stderr_checker=OutputChecker()) height = self.nodes[1].getblockcount() self.log.info("Current block height: %d" % height) invalidheight = height - 287 badhash = self.nodes[1].getblockhash(invalidheight) self.log.info("Invalidating block %s at height %d" % (badhash, invalidheight)) self.nodes[1].invalidateblock(badhash) # We've now switched to our previously mined-24 block fork on node 1, but thats not what we want. # So invalidate that fork as well, until we're on the same chain as # node 0/2 (but at an ancestor 288 blocks ago) mainchainhash = self.nodes[0].getblockhash(invalidheight - 1) curhash = self.nodes[1].getblockhash(invalidheight - 1) while curhash != mainchainhash: self.nodes[1].invalidateblock(curhash) curhash = self.nodes[1].getblockhash(invalidheight - 1) assert(self.nodes[1].getblockcount() == invalidheight - 1) self.log.info("New best height: %d" % self.nodes[1].getblockcount()) # Reboot node1 to clear those giant tx's from mempool self.stop_node(1) self.nodes[1] = start_node( 1, self.options.tmpdir, ["-maxreceivebuffer=20000", "-blockmaxsize=5000", "-checkblocks=5", "-disablesafemode", "-blockmaxsize=1000000"], timewait=900, stderr_checker=OutputChecker()) self.log.info("Generating new longer chain of 300 more blocks") self.nodes[1].generate(300) self.log.info("Reconnect nodes") connect_nodes(self.nodes[0], 1) connect_nodes(self.nodes[2], 1) sync_blocks(self.nodes[0:3], timeout=120) self.log.info("Verify height on node 2: %d" % self.nodes[2].getblockcount()) self.log.info("Usage possibly still high bc of stale blocks in block files: %d" % calc_usage(self.prunedir)) self.log.info( "Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)") for i in range(22): # This can be slow, so do this in multiple RPC calls to avoid # RPC timeouts. # node 0 has many large tx's in its mempool from the disconnects self.nodes[0].generate(10) sync_blocks(self.nodes[0:3], timeout=300) usage = calc_usage(self.prunedir) self.log.info("Usage should be below target: %d" % usage) if (usage > 550): raise AssertionError("Pruning target not being met") return invalidheight, badhash def reorg_back(self): # Verify that a block on the old main chain fork has been pruned away assert_raises_jsonrpc( -1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash) self.log.info("Will need to redownload block %d" % self.forkheight) # Verify that we have enough history to reorg back to the fork point. # Although this is more than 288 blocks, because this chain was written # more recently and only its other 299 small and 220 large block are in # the block files after it, its expected to still be retained. self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight)) first_reorg_height = self.nodes[2].getblockcount() curchainhash = self.nodes[2].getblockhash(self.mainchainheight) self.nodes[2].invalidateblock(curchainhash) goalbestheight = self.mainchainheight goalbesthash = self.mainchainhash2 # As of 0.10 the current block download logic is not able to reorg to # the original chain created in create_chain_with_stale_blocks because # it doesn't know of any peer thats on that chain from which to # redownload its missing blocks. Invalidate the reorg_test chain in # node 0 as well, it can successfully switch to the original chain # because it has all the block data. However it must mine enough blocks # to have a more work chain than the reorg_test chain in order to # trigger node 2's block download logic. At this point node 2 is within # 288 blocks of the fork point so it will preserve its ability to # reorg. if self.nodes[2].getblockcount() < self.mainchainheight: blocks_to_mine = first_reorg_height + 1 - self.mainchainheight self.log.info( "Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: %d" % blocks_to_mine) self.nodes[0].invalidateblock(curchainhash) assert(self.nodes[0].getblockcount() == self.mainchainheight) assert(self.nodes[0].getbestblockhash() == self.mainchainhash2) goalbesthash = self.nodes[0].generate(blocks_to_mine)[-1] goalbestheight = first_reorg_height + 1 self.log.info( "Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") waitstart = time.time() while self.nodes[2].getblockcount() < goalbestheight: time.sleep(0.1) if time.time() - waitstart > 900: raise AssertionError("Node 2 didn't reorg to proper height") assert(self.nodes[2].getbestblockhash() == goalbesthash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock( self.forkhash)["height"] == self.forkheight) def manual_test(self, node_number, use_timestamp): # at this point, node has 995 blocks and has not yet run in prune mode node = self.nodes[node_number] = start_node( node_number, self.options.tmpdir, ["-blockmaxsize=1000000"], timewait=900) assert_equal(node.getblockcount(), 995) assert_raises_jsonrpc( -1, "not in prune mode", node.pruneblockchain, 500) self.stop_node(node_number) # now re-start in manual pruning mode node = self.nodes[node_number] = start_node( node_number, self.options.tmpdir, ["-prune=1", "-blockmaxsize=1000000"], timewait=900) assert_equal(node.getblockcount(), 995) def height(index): if use_timestamp: return node.getblockheader(node.getblockhash(index))["time"] + RESCAN_WINDOW else: return index def prune(index, expected_ret=None): ret = node.pruneblockchain(height(index)) # Check the return value. When use_timestamp is True, just check # that the return value is less than or equal to the expected # value, because when more than one block is generated per second, # a timestamp will not be granular enough to uniquely identify an # individual block. if expected_ret is None: expected_ret = index if use_timestamp: assert_greater_than(ret, 0) assert_greater_than(expected_ret + 1, ret) else: assert_equal(ret, expected_ret) def has_block(index): return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) # should not prune because chain tip of node 3 (995) < PruneAfterHeight # (1000) assert_raises_jsonrpc( -1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) # mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight) node.generate(6) assert_equal(node.getblockchaininfo()["blocks"], 1001) # negative heights should raise an exception assert_raises_jsonrpc(-8, "Negative", node.pruneblockchain, -10) # height=100 too low to prune first block file so this is a no-op prune(100) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # Does nothing node.pruneblockchain(height(0)) if not has_block(0): raise AssertionError( "blk00000.dat is missing when should still be there") # height=500 should prune first file prune(500) if has_block(0): raise AssertionError( "blk00000.dat is still there, should be pruned by now") if not has_block(1): raise AssertionError( "blk00001.dat is missing when should still be there") # height=650 should prune second file prune(650) if has_block(1): raise AssertionError( "blk00001.dat is still there, should be pruned by now") # height=1000 should not prune anything more, because tip-288 is in # blk00002.dat. prune(1000, 1001 - MIN_BLOCKS_TO_KEEP) if not has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") # advance the tip so blk00002.dat and blk00003.dat can be pruned (the # last 288 blocks should now be in blk00004.dat) node.generate(288) prune(1000) if has_block(2): raise AssertionError( "blk00002.dat is still there, should be pruned by now") if has_block(3): raise AssertionError( "blk00003.dat is still there, should be pruned by now") # stop node, start back up with auto-prune at 550MB, make sure still # runs self.stop_node(node_number) self.nodes[node_number] = start_node( node_number, self.options.tmpdir, ["-prune=550", "-blockmaxsize=1000000"], timewait=900) self.log.info("Success") def wallet_test(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.stop_node(2) start_node(2, self.options.tmpdir, ["-prune=550"]) self.log.info("Success") # check that wallet loads loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.log.info("Syncing node 5 to test wallet") connect_nodes(self.nodes[0], 5) nds = [self.nodes[0], self.nodes[5]] sync_blocks(nds, wait=5, timeout=300) # Stop and start to trigger rescan self.stop_node(5) start_node(5, self.options.tmpdir, ["-prune=550"]) self.log.info("Success") def run_test(self): self.log.info( "Warning! This test requires 4GB of disk space and takes over 30 mins (up to 2 hours)") self.log.info("Mining a big blockchain of 995 blocks") + + # Determine default relay fee + self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"] + + # Cache for utxos, as the listunspent may take a long time later in the + # test + self.utxo_cache_0 = [] + self.utxo_cache_1 = [] + self.create_big_chain() # Chain diagram key: # * blocks on main chain # +,&,$,@ blocks on other forks # X invalidated block # N1 Node 1 # # Start by mining a simple chain that all nodes have # N0=N1=N2 **...*(995) # stop manual-pruning node with 995 blocks self.stop_node(3) self.stop_node(4) self.log.info( "Check that we haven't started pruning yet because we're below PruneAfterHeight") self.test_height_min() # Extend this chain past the PruneAfterHeight # N0=N1=N2 **...*(1020) self.log.info( "Check that we'll exceed disk space target if we have a very high stale block rate") self.create_chain_with_staleblocks() # Disconnect N0 # And mine a 24 block chain on N1 and a separate 25 block chain on N0 # N1=N2 **...*+...+(1044) # N0 **...**...**(1045) # # reconnect nodes causing reorg on N1 and N2 # N1=N2 **...*(1020) *...**(1045) # \ # +...+(1044) # # repeat this process until you have 12 stale forks hanging off the # main chain on N1 and N2 # N0 *************************...***************************(1320) # # N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320) # \ \ \ # +...+(1044) &.. $...$(1319) # Save some current chain state for later use self.mainchainheight = self.nodes[2].getblockcount() # 1320 self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight) self.log.info("Check that we can survive a 288 block reorg still") (self.forkheight, self.forkhash) = self.reorg_test() # (1033, ) # Now create a 288 block reorg by mining a longer chain on N1 # First disconnect N1 # Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain # N1 **...*(1020) **...**(1032)X.. # \ # ++...+(1031)X.. # # Now mine 300 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@(1332) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # Reconnect nodes and mine 220 more blocks on N1 # N1 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ X... # \ \ # ++...+(1031)X.. .. # # N2 **...*(1020) **...**(1032) @@...@@@(1552) # \ \ # \ *...**(1320) # \ \ # ++...++(1044) .. # # N0 ********************(1032) @@...@@@(1552) # \ # *...**(1320) self.log.info( "Test that we can rerequest a block we previously pruned if needed for a reorg") self.reorg_back() # Verify that N2 still has block 1033 on current chain (@), but not on main chain (*) # Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to # original main chain (*), but will require redownload of some blocks # In order to have a peer we think we can download from, must also perform this invalidation # on N0 and mine a new longest chain to trigger. # Final result: # N0 ********************(1032) **...****(1553) # \ # X@...@@@(1552) # # N2 **...*(1020) **...**(1032) **...****(1553) # \ \ # \ X@...@@@(1552) # \ # +.. # # N1 doesn't change because 1033 on main chain (*) is invalid self.log.info("Test manual pruning with block indices") self.manual_test(3, use_timestamp=False) self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) self.log.info("Test wallet re-scan") self.wallet_test() self.log.info("Done") if __name__ == '__main__': PruneTest().main() diff --git a/qa/rpc-tests/rawtransactions.py b/qa/rpc-tests/rawtransactions.py index 578967974..ab6f68878 100755 --- a/qa/rpc-tests/rawtransactions.py +++ b/qa/rpc-tests/rawtransactions.py @@ -1,228 +1,216 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """rawtranscation RPCs QA test. # Tests the following RPCs: # - createrawtransaction # - signrawtransaction # - sendrawtransaction # - decoderawtransaction # - getrawtransaction """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class RawTransactionsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - - # connect to a local machine for debugging - # url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332) - # proxy = AuthServiceProxy(url) - # proxy.url = url # store URL on proxy for info - # self.nodes.append(proxy) - - connect_nodes_bi(self.nodes, 0, 1) - connect_nodes_bi(self.nodes, 1, 2) + super().setup_network() connect_nodes_bi(self.nodes, 0, 2) - self.is_network_split = False - self.sync_all() - def run_test(self): # prepare some coins for multiple *rawtransaction commands self.nodes[2].generate(1) self.sync_all() self.nodes[0].generate(101) self.sync_all() self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0) self.sync_all() self.nodes[0].generate(5) self.sync_all() # # sendrawtransaction with missing input # # inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1}] # won't exists outputs = {self.nodes[0].getnewaddress(): 4.998} rawtx = self.nodes[2].createrawtransaction(inputs, outputs) rawtx = self.nodes[2].signrawtransaction( rawtx, None, None, "ALL|FORKID") # This will raise an exception since there are missing inputs assert_raises_jsonrpc( -25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex']) # # RAW TX MULTISIG TESTS # # # 2of2 test addr1 = self.nodes[2].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[2].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) mSigObjValid = self.nodes[2].validateaddress(mSigObj) # use balance deltas instead of absolute values bal = self.nodes[2].getbalance() # send 1.2 BTC to msig adr txId = self.nodes[0].sendtoaddress(mSigObj, 1.2) self.sync_all() self.nodes[0].generate(1) self.sync_all() # node2 has both keys of the 2of2 ms addr., tx should affect the # balance assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000')) # 2of3 test from different nodes bal = self.nodes[2].getbalance() addr1 = self.nodes[1].getnewaddress() addr2 = self.nodes[2].getnewaddress() addr3 = self.nodes[2].getnewaddress() addr1Obj = self.nodes[1].validateaddress(addr1) addr2Obj = self.nodes[2].validateaddress(addr2) addr3Obj = self.nodes[2].validateaddress(addr3) mSigObj = self.nodes[2].addmultisigaddress( 2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']]) mSigObjValid = self.nodes[2].validateaddress(mSigObj) txId = self.nodes[0].sendtoaddress(mSigObj, 2.2) decTx = self.nodes[0].gettransaction(txId) rawTx = self.nodes[0].decoderawtransaction(decTx['hex']) sPK = rawTx['vout'][0]['scriptPubKey']['hex'] self.sync_all() self.nodes[0].generate(1) self.sync_all() # THIS IS A INCOMPLETE FEATURE # NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND # COUNT AT BALANCE CALCULATION # for now, assume the funds of a 2of3 multisig tx are not marked as # spendable assert_equal(self.nodes[2].getbalance(), bal) txDetails = self.nodes[0].gettransaction(txId, True) rawTx = self.nodes[0].decoderawtransaction(txDetails['hex']) vout = False for outpoint in rawTx['vout']: if outpoint['value'] == Decimal('2.20000000'): vout = outpoint break bal = self.nodes[0].getbalance() inputs = [{ "txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value'], }] outputs = {self.nodes[0].getnewaddress(): 2.19} rawTx = self.nodes[2].createrawtransaction(inputs, outputs) rawTxPartialSigned = self.nodes[ 1].signrawtransaction(rawTx, inputs, None, "ALL|FORKID") # node1 only has one key, can't comp. sign the tx assert_equal(rawTxPartialSigned['complete'], False) rawTxSigned = self.nodes[2].signrawtransaction( rawTx, inputs, None, "ALL|FORKID") # node2 can sign the tx compl., own two of three keys assert_equal(rawTxSigned['complete'], True) self.nodes[2].sendrawtransaction(rawTxSigned['hex']) rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex']) self.sync_all() self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance(), bal + Decimal( '50.00000000') + Decimal('2.19000000')) # block reward + tx # getrawtransaction tests # 1. valid parameters - only supply txid txHash = rawTx["hash"] assert_equal( self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex']) # 2. valid parameters - supply txid and 0 for non-verbose assert_equal( self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex']) # 3. valid parameters - supply txid and False for non-verbose assert_equal(self.nodes[0].getrawtransaction( txHash, False), rawTxSigned['hex']) # 4. valid parameters - supply txid and 1 for verbose. # We only check the "hex" field of the output so we don't need to # update this test every time the output format changes. assert_equal(self.nodes[0].getrawtransaction( txHash, 1)["hex"], rawTxSigned['hex']) # 5. valid parameters - supply txid and True for non-verbose assert_equal(self.nodes[0].getrawtransaction( txHash, True)["hex"], rawTxSigned['hex']) # 6. invalid parameters - supply txid and string "Flase" assert_raises_jsonrpc( -3, "Invalid type", self.nodes[0].getrawtransaction, txHash, "False") # 7. invalid parameters - supply txid and empty array assert_raises_jsonrpc( -3, "Invalid type", self.nodes[0].getrawtransaction, txHash, []) # 8. invalid parameters - supply txid and empty dict assert_raises_jsonrpc( -3, "Invalid type", self.nodes[0].getrawtransaction, txHash, {}) inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1, 'sequence': 1000}] outputs = {self.nodes[0].getnewaddress(): 1} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 1000) # 9. invalid parameters - sequence number out of range inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1, 'sequence': -1}] outputs = {self.nodes[0].getnewaddress(): 1} assert_raises_jsonrpc( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) # 10. invalid parameters - sequence number out of range inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1, 'sequence': 4294967296}] outputs = {self.nodes[0].getnewaddress(): 1} assert_raises_jsonrpc( -8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs) inputs = [ {'txid': "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout': 1, 'sequence': 4294967294}] outputs = {self.nodes[0].getnewaddress(): 1} rawtx = self.nodes[0].createrawtransaction(inputs, outputs) decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['vin'][0]['sequence'], 4294967294) if __name__ == '__main__': RawTransactionsTest().main() diff --git a/qa/rpc-tests/receivedby.py b/qa/rpc-tests/receivedby.py index d38697236..44d23f3f3 100755 --- a/qa/rpc-tests/receivedby.py +++ b/qa/rpc-tests/receivedby.py @@ -1,164 +1,164 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the listreceivedbyaddress API from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * def get_sub_array_from_array(object_array, to_match): ''' Finds and returns a sub array from an array of arrays. to_match should be a unique idetifier of a sub array ''' for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue return item return [] class ReceivedByTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def setup_nodes(self): # This test requires mocktime enable_mocktime() - return start_nodes(self.num_nodes, self.options.tmpdir) + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) def run_test(self): ''' listreceivedbyaddress Test ''' # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # Check not listed in listreceivedbyaddress because has 0 confirmations assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, {}, True) # Bury Tx under 10 block so it will be returned by # listreceivedbyaddress self.nodes[1].generate(10) self.sync_all() assert_array_result(self.nodes[1].listreceivedbyaddress(), {"address": addr}, {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence < 10 assert_array_result(self.nodes[1].listreceivedbyaddress(5), {"address": addr}, {"address": addr, "account": "", "amount": Decimal("0.1"), "confirmations": 10, "txids": [txid, ]}) # With min confidence > 10, should not find Tx assert_array_result( self.nodes[1].listreceivedbyaddress(11), {"address": addr}, {}, True) # Empty Tx addr = self.nodes[1].getnewaddress() assert_array_result(self.nodes[1].listreceivedbyaddress(0, True), {"address": addr}, {"address": addr, "account": "", "amount": 0, "confirmations": 0, "txids": []}) ''' getreceivedbyaddress Test ''' # Send from node 0 to 1 addr = self.nodes[1].getnewaddress() txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # Check balance is 0 because of 0 confirmations balance = self.nodes[1].getreceivedbyaddress(addr) if balance != Decimal("0.0"): raise AssertionError( "Wrong balance returned by getreceivedbyaddress, %0.2f" % (balance)) # Check balance is 0.1 balance = self.nodes[1].getreceivedbyaddress(addr, 0) if balance != Decimal("0.1"): raise AssertionError( "Wrong balance returned by getreceivedbyaddress, %0.2f" % (balance)) # Bury Tx under 10 block so it will be returned by the default # getreceivedbyaddress self.nodes[1].generate(10) self.sync_all() balance = self.nodes[1].getreceivedbyaddress(addr) if balance != Decimal("0.1"): raise AssertionError( "Wrong balance returned by getreceivedbyaddress, %0.2f" % (balance)) ''' listreceivedbyaccount + getreceivedbyaccount Test ''' # set pre-state addrArr = self.nodes[1].getnewaddress() account = self.nodes[1].getaccount(addrArr) received_by_account_json = get_sub_array_from_array( self.nodes[1].listreceivedbyaccount(), {"account": account}) if len(received_by_account_json) == 0: raise AssertionError("No accounts found in node") balance_by_account = self.nodes[1].getreceivedbyaccount(account) txid = self.nodes[0].sendtoaddress(addr, 0.1) self.sync_all() # listreceivedbyaccount should return received_by_account_json because # of 0 confirmations assert_array_result(self.nodes[1].listreceivedbyaccount(), {"account": account}, received_by_account_json) # getreceivedbyaddress should return same balance because of 0 # confirmations balance = self.nodes[1].getreceivedbyaccount(account) if balance != balance_by_account: raise AssertionError( "Wrong balance returned by getreceivedbyaccount, %0.2f" % (balance)) self.nodes[1].generate(10) self.sync_all() # listreceivedbyaccount should return updated account balance assert_array_result(self.nodes[1].listreceivedbyaccount(), {"account": account}, {"account": received_by_account_json["account"], "amount": (received_by_account_json["amount"] + Decimal("0.1"))}) # getreceivedbyaddress should return updates balance balance = self.nodes[1].getreceivedbyaccount(account) if balance != balance_by_account + Decimal("0.1"): raise AssertionError( "Wrong balance returned by getreceivedbyaccount, %0.2f" % (balance)) # Create a new account named "mynewaccount" that has a 0 balance self.nodes[1].getaccountaddress("mynewaccount") received_by_account_json = get_sub_array_from_array( self.nodes[1].listreceivedbyaccount(0, True), {"account": "mynewaccount"}) if len(received_by_account_json) == 0: raise AssertionError("No accounts found in node") # Test includeempty of listreceivedbyaccount if received_by_account_json["amount"] != Decimal("0.0"): raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f" % (received_by_account_json["amount"])) # Test getreceivedbyaccount for 0 amount accounts balance = self.nodes[1].getreceivedbyaccount("mynewaccount") if balance != Decimal("0.0"): raise AssertionError( "Wrong balance returned by getreceivedbyaccount, %0.2f" % (balance)) if __name__ == '__main__': ReceivedByTest().main() diff --git a/qa/rpc-tests/reindex.py b/qa/rpc-tests/reindex.py index d973a1548..34a36d585 100755 --- a/qa/rpc-tests/reindex.py +++ b/qa/rpc-tests/reindex.py @@ -1,49 +1,46 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test -reindex and -reindex-chainstate with CheckBlockIndex # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( start_nodes, stop_nodes, assert_equal, ) import time class ReindexTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - def setup_network(self): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - def reindex(self, justchainstate=False): self.nodes[0].generate(3) blockcount = self.nodes[0].getblockcount() stop_nodes(self.nodes) extra_args = [[ "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] self.nodes = start_nodes( self.num_nodes, self.options.tmpdir, extra_args) while self.nodes[0].getblockcount() < blockcount: time.sleep(0.1) assert_equal(self.nodes[0].getblockcount(), blockcount) self.log.info("Success") def run_test(self): self.reindex(False) self.reindex(True) self.reindex(False) self.reindex(True) if __name__ == '__main__': ReindexTest().main() diff --git a/qa/rpc-tests/rest.py b/qa/rpc-tests/rest.py index 7d7e138b7..97ef48cf5 100755 --- a/qa/rpc-tests/rest.py +++ b/qa/rpc-tests/rest.py @@ -1,396 +1,392 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test REST interface # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from struct import * from io import BytesIO from codecs import encode import http.client import urllib.parse def deser_uint256(f): r = 0 for i in range(8): t = unpack(b" this won't connect. with mininode_lock: test_node.last_getheaders = None test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders(timeout=1) test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [test_node.send_message(msg_block(x)) for x in blocks] test_node.sync_with_ping() assert_equal( int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for j in range(MAX_UNCONNECTING_HEADERS + 1): blocks.append( create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: test_node.last_getheaders = None test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders(timeout=1) # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5 * MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a # getheaders. with mininode_lock: test_node.last_getheaders = None test_node.send_header_for_blocks([blocks[i % len(blocks)]]) test_node.wait_for_getheaders(timeout=1) # Eventually this stops working. with mininode_lock: self.last_getheaders = None test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() with mininode_lock: self.last_getheaders = True self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert_equal(inv_node.last_getdata, None) if __name__ == '__main__': SendHeadersTest().main() diff --git a/qa/rpc-tests/signmessages.py b/qa/rpc-tests/signmessages.py index b3376ea17..0f1f0a3ec 100755 --- a/qa/rpc-tests/signmessages.py +++ b/qa/rpc-tests/signmessages.py @@ -1,42 +1,38 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class SignMessagesTest(BitcoinTestFramework): """Tests RPC commands for signing and verifying messages.""" def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - self.is_network_split = False - def run_test(self): message = 'This is just a test message' # Test the signing with a privkey privKey = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N' address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB' signature = self.nodes[0].signmessagewithprivkey(privKey, message) # Verify the message assert(self.nodes[0].verifymessage(address, signature, message)) # Test the signing with an address with wallet address = self.nodes[0].getnewaddress() signature = self.nodes[0].signmessage(address, message) # Verify the message assert(self.nodes[0].verifymessage(address, signature, message)) if __name__ == '__main__': SignMessagesTest().main() diff --git a/qa/rpc-tests/signrawtransactions.py b/qa/rpc-tests/signrawtransactions.py index 292e6c7d9..9823b6a24 100755 --- a/qa/rpc-tests/signrawtransactions.py +++ b/qa/rpc-tests/signrawtransactions.py @@ -1,154 +1,150 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class SignRawTransactionsTest(BitcoinTestFramework): """Tests transaction signing via RPC command "signrawtransaction".""" def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - self.is_network_split = False - def successful_signing_test(self): """Creates and signs a valid raw transaction with one input. Expected results: 1) The transaction has a complete set of signatures 2) No script verification error occurred""" privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA'] inputs = [ # Valid pay-to-pubkey scripts {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 3.14159, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, {'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0, 'amount': '123.456', 'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'}, ] outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys) # 1) The transaction has a complete set of signatures assert 'complete' in rawTxSigned assert_equal(rawTxSigned['complete'], True) # 2) No script verification error occurred assert 'errors' not in rawTxSigned # Check that signrawtransaction doesn't blow up on garbage merge # attempts dummyTxInconsistent = self.nodes[ 0].createrawtransaction([inputs[0]], outputs) rawTxUnsigned = self.nodes[0].signrawtransaction( rawTx + dummyTxInconsistent, inputs) assert 'complete' in rawTxUnsigned assert_equal(rawTxUnsigned['complete'], False) # Check that signrawtransaction properly merges unsigned and signed # txn, even with garbage in the middle rawTxSigned2 = self.nodes[0].signrawtransaction( rawTxUnsigned["hex"] + dummyTxInconsistent + rawTxSigned["hex"], inputs) assert 'complete' in rawTxSigned2 assert_equal(rawTxSigned2['complete'], True) assert 'errors' not in rawTxSigned2 def script_verification_error_test(self): """Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script. Expected results: 3) The transaction has no complete set of signatures 4) Two script verification errors occurred 5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error") 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)""" privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'] inputs = [ # Valid pay-to-pubkey script {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 0}, # Invalid script {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7, 'amount': '1.1'}, # Missing scriptPubKey {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1, 'amount': 2.0}, ] scripts = [ # Valid pay-to-pubkey script {'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0, 'amount': 0, 'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}, # Invalid script {'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7, 'amount': '1.1', 'scriptPubKey': 'badbadbadbad'} ] outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1} rawTx = self.nodes[0].createrawtransaction(inputs, outputs) # Make sure decoderawtransaction is at least marginally sane decodedRawTx = self.nodes[0].decoderawtransaction(rawTx) for i, inp in enumerate(inputs): assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"]) assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"]) # Make sure decoderawtransaction throws if there is extra data assert_raises(JSONRPCException, self.nodes[ 0].decoderawtransaction, rawTx + "00") rawTxSigned = self.nodes[0].signrawtransaction( rawTx, scripts, privKeys) # 3) The transaction has no complete set of signatures assert 'complete' in rawTxSigned assert_equal(rawTxSigned['complete'], False) # 4) Two script verification errors occurred assert 'errors' in rawTxSigned assert_equal(len(rawTxSigned['errors']), 2) # 5) Script verification errors have certain properties assert 'txid' in rawTxSigned['errors'][0] assert 'vout' in rawTxSigned['errors'][0] assert 'scriptSig' in rawTxSigned['errors'][0] assert 'sequence' in rawTxSigned['errors'][0] assert 'error' in rawTxSigned['errors'][0] # 6) The verification errors refer to the invalid (vin 1) and missing # input (vin 2) assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid']) assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout']) assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid']) assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout']) def run_test(self): self.successful_signing_test() self.script_verification_error_test() if __name__ == '__main__': SignRawTransactionsTest().main() diff --git a/qa/rpc-tests/smartfees.py b/qa/rpc-tests/smartfees.py index e2e2d6de1..71390cbcd 100755 --- a/qa/rpc-tests/smartfees.py +++ b/qa/rpc-tests/smartfees.py @@ -1,298 +1,297 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test fee estimation code # from collections import OrderedDict from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.outputchecker import OutputChecker # Construct 2 trivial P2SH's and the ScriptSigs that spend them # So we can create many many transactions without needing to spend # time signing. P2SH_1 = "2MySexEGVzZpRgNQ1JdjdP5bRETznm3roQ2" # P2SH of "OP_1 OP_DROP" P2SH_2 = "2NBdpwq8Aoo1EEKEXPNrKvr5xQr3M9UfcZA" # P2SH of "OP_2 OP_DROP" # Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2 # 4 bytes of OP_TRUE and push 2-byte redeem script of "OP_1 OP_DROP" or # "OP_2 OP_DROP" SCRIPT_SIG = ["0451025175", "0451025275"] global log def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment): ''' Create and send a transaction with a random fee. The transaction pays to a trivial P2SH script, and assumes that its inputs are of the same form. The function takes a list of confirmed outputs and unconfirmed outputs and attempts to use the confirmed list first for its inputs. It adds the newly created outputs to the unconfirmed list. Returns (raw transaction, fee) ''' # It's best to exponentially distribute our random fees # because the buckets are exponentially spaced. # Exponentially distributed from 1-128 * fee_increment rand_fee = float(fee_increment) * (1.1892**random.randint(0, 28)) # Total fee ranges from min_fee to min_fee + 127*fee_increment fee = min_fee - fee_increment + satoshi_round(rand_fee) inputs = [] total_in = Decimal("0.00000000") while total_in <= (amount + fee) and len(conflist) > 0: t = conflist.pop(0) total_in += t["amount"] inputs.append({"txid": t["txid"], "vout": t["vout"]}) if total_in <= amount + fee: while total_in <= (amount + fee) and len(unconflist) > 0: t = unconflist.pop(0) total_in += t["amount"] inputs.append({"txid": t["txid"], "vout": t["vout"]}) if total_in <= amount + fee: raise RuntimeError( "Insufficient funds: need %d, have %d" % (amount + fee, total_in)) outputs = {} outputs = OrderedDict([(P2SH_1, total_in - amount - fee), (P2SH_2, amount)]) rawtx = from_node.createrawtransaction(inputs, outputs) # createrawtransaction constructs a transaction that is ready to be signed. # These transactions don't need to be signed, but we still have to insert the ScriptSig # that will satisfy the ScriptPubKey. completetx = rawtx[0:10] inputnum = 0 for inp in inputs: completetx += rawtx[10 + 82 * inputnum:82 + 82 * inputnum] completetx += SCRIPT_SIG[inp["vout"]] completetx += rawtx[84 + 82 * inputnum:92 + 82 * inputnum] inputnum += 1 completetx += rawtx[10 + 82 * inputnum:] txid = from_node.sendrawtransaction(completetx, True) unconflist.append( {"txid": txid, "vout": 0, "amount": total_in - amount - fee}) unconflist.append({"txid": txid, "vout": 1, "amount": amount}) return (completetx, fee) def split_inputs(from_node, txins, txouts, initial_split=False): ''' We need to generate a lot of very small inputs so we can generate a ton of transactions and they will have low priority. This function takes an input from txins, and creates and sends a transaction which splits the value into 2 outputs which are appended to txouts. ''' prevtxout = txins.pop() inputs = [] inputs.append({"txid": prevtxout["txid"], "vout": prevtxout["vout"]}) half_change = satoshi_round(prevtxout["amount"] / 2) rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000") outputs = OrderedDict([(P2SH_1, half_change), (P2SH_2, rem_change)]) rawtx = from_node.createrawtransaction(inputs, outputs) # If this is the initial split we actually need to sign the transaction # Otherwise we just need to insert the property ScriptSig if (initial_split): completetx = from_node.signrawtransaction( rawtx, None, None, "ALL|FORKID")["hex"] else: completetx = rawtx[0:82] + SCRIPT_SIG[prevtxout["vout"]] + rawtx[84:] txid = from_node.sendrawtransaction(completetx, True) txouts.append({"txid": txid, "vout": 0, "amount": half_change}) txouts.append({"txid": txid, "vout": 1, "amount": rem_change}) def check_estimates(node, fees_seen, max_invalid, print_estimates=True): ''' This function calls estimatefee and verifies that the estimates meet certain invariants. ''' all_estimates = [node.estimatefee(i) for i in range(1, 26)] if print_estimates: log.info([str(all_estimates[e - 1]) for e in [1, 2, 3, 6, 15, 25]]) delta = 1.0e-6 # account for rounding error last_e = max(fees_seen) for e in [x for x in all_estimates if x >= 0]: # Estimates should be within the bounds of what transactions fees # actually were: if float(e) + delta < min(fees_seen) or float(e) - delta > max(fees_seen): raise AssertionError("Estimated fee (%f) out of range (%f,%f)" % (float(e), min(fees_seen), max(fees_seen))) # Estimates should be monotonically decreasing if float(e) - delta > last_e: raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms" % (float(e), float(last_e))) last_e = e valid_estimate = False invalid_estimates = 0 for i, e in enumerate(all_estimates): # estimate is for i+1 if e >= 0: valid_estimate = True # estimatesmartfee should return the same result assert_equal(node.estimatesmartfee(i + 1)["feerate"], e) else: invalid_estimates += 1 # estimatesmartfee should still be valid approx_estimate = node.estimatesmartfee(i + 1)["feerate"] answer_found = node.estimatesmartfee(i + 1)["blocks"] assert(approx_estimate > 0) assert(answer_found > i + 1) # Once we're at a high enough confirmation count that we can give an estimate # We should have estimates for all higher confirmation counts if valid_estimate: raise AssertionError( "Invalid estimate appears at higher confirm count than valid estimate") # Check on the expected number of different confirmation counts # that we might not have valid estimates for if invalid_estimates > max_invalid: raise AssertionError( "More than (%d) invalid estimates" % (max_invalid)) return all_estimates class EstimateFeeTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 3 self.setup_clean_chain = False def setup_network(self): ''' We'll setup the network to have 3 nodes that all mine with different parameters. But first we need to use one node to create a lot of small low priority outputs which we will use to generate our transactions. ''' self.nodes = [] # Use node0 to mine blocks for input splitting self.nodes.append( start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-whitelist=127.0.0.1"])) self.log.info("This test is time consuming, please be patient") self.log.info( "Splitting inputs to small size so we can generate low priority tx's") self.txouts = [] self.txouts2 = [] # Split a coinbase into two transaction puzzle outputs split_inputs(self.nodes[0], self.nodes[ 0].listunspent(0), self.txouts, True) # Mine while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) # Repeatedly split those 2 outputs, doubling twice for each rep # Use txouts to monitor the available utxo, since these won't be # tracked in wallet reps = 0 while (reps < 5): # Double txouts to txouts2 while (len(self.txouts) > 0): split_inputs(self.nodes[0], self.txouts, self.txouts2) while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) # Double txouts2 to txouts while (len(self.txouts2) > 0): split_inputs(self.nodes[0], self.txouts2, self.txouts) while (len(self.nodes[0].getrawmempool()) > 0): self.nodes[0].generate(1) reps += 1 self.log.info("Finished splitting") # Now we can connect the other nodes, didn't want to connect them earlier # so the estimates would not be affected by the splitting transactions # Node1 mines small blocks but that are bigger than the expected transaction rate, # and allows free transactions. # NOTE: the CreateNewBlock code starts counting block size at 1,000 bytes, # (17k is room enough for 110 or so transactions) self.nodes.append(start_node(1, self.options.tmpdir, ["-blockprioritysize=1500", "-blockmaxsize=17000", "-maxorphantx=1000"], stderr_checker=OutputChecker())) connect_nodes(self.nodes[1], 0) # Node2 is a stingy miner, that # produces too small blocks (room for only 55 or so transactions) node2args = ["-blockprioritysize=0", "-blockmaxsize=8000", "-maxorphantx=1000"] self.nodes.append( start_node(2, self.options.tmpdir, node2args, stderr_checker=OutputChecker())) connect_nodes(self.nodes[0], 2) connect_nodes(self.nodes[2], 1) - self.is_network_split = False self.sync_all() def transact_and_mine(self, numblocks, mining_node): min_fee = Decimal("0.00001") # We will now mine numblocks blocks generating on average 100 transactions between each block # We shuffle our confirmed txout set before each set of transactions # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible # resorting to tx's that depend on the mempool when those run out for i in range(numblocks): random.shuffle(self.confutxo) for j in range(random.randrange(100 - 50, 100 + 50)): from_index = random.randint(1, 2) (txhex, fee) = small_txpuzzle_randfee(self.nodes[ from_index], self.confutxo, self.memutxo, Decimal("0.005"), min_fee, min_fee) tx_kbytes = (len(txhex) // 2) / 1000.0 self.fees_per_kb.append(float(fee) / tx_kbytes) sync_mempools(self.nodes[0:3], wait=.1) mined = mining_node.getblock( mining_node.generate(1)[0], True)["tx"] sync_blocks(self.nodes[0:3], wait=.1) # update which txouts are confirmed newmem = [] for utx in self.memutxo: if utx["txid"] in mined: self.confutxo.append(utx) else: newmem.append(utx) self.memutxo = newmem def run_test(self): # Make log handler available to helper functions global log log = self.log self.fees_per_kb = [] self.memutxo = [] self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") for i in range(2): self.log.info( "Creating transactions and mining them with a block size that can't keep up") # Create transactions and mine 10 small blocks with node 2, but # create txs faster than we can mine self.transact_and_mine(10, self.nodes[2]) check_estimates(self.nodes[1], self.fees_per_kb, 14) self.log.info( "Creating transactions and mining them at a block size that is just big enough") # Generate transactions while mining 10 more blocks, this time with node1 # which mines blocks with capacity just above the rate that # transactions are being created self.transact_and_mine(10, self.nodes[1]) check_estimates(self.nodes[1], self.fees_per_kb, 2) # Finish by mining a normal-sized block: while len(self.nodes[1].getrawmempool()) > 0: self.nodes[1].generate(1) sync_blocks(self.nodes[0:3], wait=.1) self.log.info("Final estimates after emptying mempools") check_estimates(self.nodes[1], self.fees_per_kb, 2) if __name__ == '__main__': EstimateFeeTest().main() diff --git a/qa/rpc-tests/test_framework/test_framework.py b/qa/rpc-tests/test_framework/test_framework.py index aad27728f..7a1736636 100755 --- a/qa/rpc-tests/test_framework/test_framework.py +++ b/qa/rpc-tests/test_framework/test_framework.py @@ -1,272 +1,267 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Base class for RPC testing import logging import optparse import os import sys import shutil import tempfile import traceback from .util import ( initialize_chain, start_nodes, connect_nodes_bi, + disconnect_nodes, sync_blocks, sync_mempools, stop_nodes, stop_node, enable_coverage, check_json_precision, initialize_chain_clean, PortSeed, ) from .authproxy import JSONRPCException from .outputchecker import OutputChecker class BitcoinTestFramework(object): def __init__(self): self.num_nodes = 4 self.setup_clean_chain = False self.nodes = None def run_test(self): raise NotImplementedError def add_options(self, parser): pass def setup_chain(self): self.log.info("Initializing test directory " + self.options.tmpdir) if self.setup_clean_chain: initialize_chain_clean(self.options.tmpdir, self.num_nodes) else: initialize_chain( self.options.tmpdir, self.num_nodes, self.options.cachedir) def stop_node(self, num_node): stop_node(self.nodes[num_node], num_node) def setup_nodes(self): ''' Starts up the nodes. ''' - return start_nodes(self.num_nodes, self.options.tmpdir) + extra_args = None + if hasattr(self, "extra_args"): + extra_args = self.extra_args + self.nodes = start_nodes( + self.num_nodes, self.options.tmpdir, extra_args) - def setup_network(self, split=False): + def setup_network(self): ''' Sets up network including starting up nodes. ''' - self.nodes = self.setup_nodes() + self.setup_nodes() # Connect the nodes as a "chain". This allows us # to split the network between nodes 1 and 2 to get # two halves that can work on competing chains. - # If we joined network halves, connect the nodes from the joint - # on outward. This ensures that chains are properly reorganised. - if not split: - connect_nodes_bi(self.nodes, 1, 2) - sync_blocks(self.nodes[1:3]) - sync_mempools(self.nodes[1:3]) - - connect_nodes_bi(self.nodes, 0, 1) - connect_nodes_bi(self.nodes, 2, 3) - self.is_network_split = split + for i in range(self.num_nodes - 1): + connect_nodes_bi(self.nodes, i, i + 1) self.sync_all() def split_network(self): """ Split the network of four nodes into nodes 0/1 and 2/3. """ - assert not self.is_network_split - stop_nodes(self.nodes) - self.setup_network(True) - - def sync_all(self): - if self.is_network_split: - sync_blocks(self.nodes[:2]) - sync_blocks(self.nodes[2:]) - sync_mempools(self.nodes[:2]) - sync_mempools(self.nodes[2:]) - else: - sync_blocks(self.nodes) - sync_mempools(self.nodes) + disconnect_nodes(self.nodes[1], 2) + disconnect_nodes(self.nodes[2], 1) + self.sync_all([self.nodes[:2], self.nodes[2:]]) + + def sync_all(self, node_groups=None): + if not node_groups: + node_groups = [self.nodes] + + [sync_blocks(group) for group in node_groups] + [sync_mempools(group) for group in node_groups] def join_network(self): """ Join the (previously split) network halves together. """ - assert self.is_network_split - stop_nodes(self.nodes) - self.setup_network(False) + connect_nodes_bi(self.nodes, 1, 2) + self.sync_all() def main(self): parser = optparse.OptionParser(usage="%prog [options]") parser.add_option( "--nocleanup", dest="nocleanup", default=False, action="store_true", help="Leave bitcoinds and test.* datadir on exit or error") parser.add_option( "--noshutdown", dest="noshutdown", default=False, action="store_true", help="Don't stop bitcoinds after the test execution") parser.add_option( "--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../../src"), help="Source directory containing bitcoind/bitcoin-cli (default: %default)") parser.add_option( "--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), help="Directory for caching pregenerated datadirs") parser.add_option( "--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), help="Root directory for datadirs") parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO", help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.") parser.add_option( "--tracerpc", dest="trace_rpc", default=False, action="store_true", help="Print out all RPC calls as they are made") parser.add_option( "--portseed", dest="port_seed", default=os.getpid(), type='int', help="The seed to use for assigning port numbers (default: current process id)") parser.add_option("--coveragedir", dest="coveragedir", help="Write tested RPC commands into this directory") self.add_options(parser) (self.options, self.args) = parser.parse_args() # backup dir variable for removal at cleanup self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + \ '/' + str(self.options.port_seed) if self.options.coveragedir: enable_coverage(self.options.coveragedir) PortSeed.n = self.options.port_seed os.environ['PATH'] = self.options.srcdir + ":" + \ self.options.srcdir + "/qt:" + os.environ['PATH'] check_json_precision() # Set up temp directory and start logging os.makedirs(self.options.tmpdir, exist_ok=False) self._start_logging() success = False try: self.setup_chain() self.setup_network() self.run_test() success = True except JSONRPCException as e: self.log.exception("JSONRPC error") except AssertionError as e: self.log.exception("Assertion failed") except KeyError as e: self.log.exception("Key error") except Exception as e: self.log.exception("Unexpected exception caught during testing") except KeyboardInterrupt as e: self.log.warning("Exiting after keyboard interrupt") if not self.options.noshutdown: self.log.info("Stopping nodes") stop_nodes(self.nodes) else: self.log.info( "Note: bitcoinds were not stopped and may still be running") if not self.options.nocleanup and not self.options.noshutdown and success: self.log.info("Cleaning up") shutil.rmtree(self.options.tmpdir) if not os.listdir(self.options.root): os.rmdir(self.options.root) else: self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) if os.getenv("PYTHON_DEBUG", ""): # Dump the end of the debug logs, to aid in debugging rare # travis failures. import glob filenames = glob.glob( self.options.tmpdir + "/node*/regtest/debug.log") MAX_LINES_TO_PRINT = 1000 for f in filenames: print("From", f, ":") from collections import deque print("".join(deque(open(f), MAX_LINES_TO_PRINT))) if success: self.log.info("Tests successful") sys.exit(0) else: self.log.error( "Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir) logging.shutdown() sys.exit(1) def _start_logging(self): # Add logger and logging handlers self.log = logging.getLogger('TestFramework') self.log.setLevel(logging.DEBUG) # Create file handler to log all messages fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log') fh.setLevel(logging.DEBUG) # Create console handler to log messages to stderr. By default this # logs only error messages, but can be configured with --loglevel. ch = logging.StreamHandler(sys.stdout) # User can provide log level as a number or string (eg DEBUG). loglevel # was caught as a string, so try to convert it to an int ll = int( self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper() ch.setLevel(ll) # Format logs the same as bitcoind's debug.log with microprecision (so # log files can be concatenated and sorted) formatter = logging.Formatter( fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger self.log.addHandler(fh) self.log.addHandler(ch) if self.options.trace_rpc: rpc_logger = logging.getLogger("BitcoinRPC") rpc_logger.setLevel(logging.DEBUG) rpc_handler = logging.StreamHandler(sys.stdout) rpc_handler.setLevel(logging.DEBUG) rpc_logger.addHandler(rpc_handler) # Test framework for doing p2p comparison testing, which sets up some bitcoind # binaries: # 1 binary: test binary # 2 binaries: 1 test binary, 1 ref binary # n>2 binaries: 1 test binary, n-1 ref binaries class ComparisonTestFramework(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = True def add_options(self, parser): parser.add_option("--testbinary", dest="testbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to test") parser.add_option("--refbinary", dest="refbinary", default=os.getenv("BITCOIND", "bitcoind"), help="bitcoind binary to use for reference nodes (if any)") def setup_network(self): + extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes + if hasattr(self, "extra_args"): + extra_args = self.extra_args self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, - extra_args=[['-whitelist=127.0.0.1']] * self.num_nodes, + self.num_nodes, self.options.tmpdir, extra_args, binary=[self.options.testbinary] + [self.options.refbinary] * (self.num_nodes - 1)) diff --git a/qa/rpc-tests/test_framework/util.py b/qa/rpc-tests/test_framework/util.py index 2711c7316..7d9820935 100644 --- a/qa/rpc-tests/test_framework/util.py +++ b/qa/rpc-tests/test_framework/util.py @@ -1,878 +1,893 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # import os import sys from binascii import hexlify, unhexlify from base64 import b64encode from decimal import Decimal, ROUND_DOWN import json import http.client import random import shutil import subprocess import time import re import errno import logging from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException from .outputchecker import OutputChecker DEFAULT_BITCOIND = 'bitcoind' COVERAGE_DIR = None logger = logging.getLogger("TestFramework.utils") # The maximum number of nodes a single test can spawn MAX_NODES = 8 # Don't assign rpc or p2p ports lower than this PORT_MIN = 11000 # The number of ports to "reserve" for p2p and rpc, each PORT_RANGE = 5000 BITCOIND_PROC_WAIT_TIMEOUT = 60 class PortSeed: # Must be initialized with a unique integer for each process n = None # Set Mocktime default to OFF. # MOCKTIME is only needed for scripts that use the # cached version of the blockchain. If the cached # version of the blockchain is used without MOCKTIME # then the mempools will not sync due to IBD. MOCKTIME = 0 def enable_mocktime(): # For backwared compatibility of the python scripts # with previous versions of the cache, set MOCKTIME # to Jan 1, 2014 + (201 * 10 * 60) global MOCKTIME MOCKTIME = 1388534400 + (201 * 10 * 60) def disable_mocktime(): global MOCKTIME MOCKTIME = 0 def get_mocktime(): return MOCKTIME def enable_coverage(dirname): """Maintain a log of which RPC calls are made during testing.""" global COVERAGE_DIR COVERAGE_DIR = dirname def get_rpc_proxy(url, node_number, timeout=None): """ Args: url (str): URL of the RPC server to call node_number (int): the node number (or id) that this calls to Kwargs: timeout (int): HTTP timeout in seconds Returns: AuthServiceProxy. convenience object for making RPC calls. """ proxy_kwargs = {} if timeout is not None: proxy_kwargs['timeout'] = timeout proxy = AuthServiceProxy(url, **proxy_kwargs) proxy.url = url # store URL on proxy for info coverage_logfile = coverage.get_filename( COVERAGE_DIR, node_number) if COVERAGE_DIR else None return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) def p2p_port(n): assert(n <= MAX_NODES) return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def rpc_port(n): return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def count_bytes(hex_string): return len(bytearray.fromhex(hex_string)) def bytes_to_hex_str(byte_str): return hexlify(byte_str).decode('ascii') def hex_str_to_bytes(hex_str): return unhexlify(hex_str.encode('ascii')) def str_to_b64str(string): return b64encode(string.encode('utf-8')).decode('ascii') def sync_blocks(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same tip. sync_blocks needs to be called with an rpc_connections set that has least one node already synced to the latest, stable tip, otherwise there's a chance it might return before all nodes are stably synced. """ # Use getblockcount() instead of waitforblockheight() to determine the # initial max height because the two RPCs look at different internal global # variables (chainActive vs latestBlock) and the former gets updated # earlier. maxheight = max(x.getblockcount() for x in rpc_connections) start_time = cur_time = time.time() while cur_time <= start_time + timeout: tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections] if all(t["height"] == maxheight for t in tips): if all(t["hash"] == tips[0]["hash"] for t in tips): return raise AssertionError("Block sync failed, mismatched block hashes:{}".format( "".join("\n {!r}".format(tip) for tip in tips))) cur_time = time.time() raise AssertionError("Block sync to height {} timed out:{}".format( maxheight, "".join("\n {!r}".format(tip) for tip in tips))) def sync_chain(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same best block """ while timeout > 0: best_hash = [x.getbestblockhash() for x in rpc_connections] if best_hash == [best_hash[0]] * len(best_hash): return time.sleep(wait) timeout -= wait raise AssertionError("Chain sync failed: Best block hashes don't match") def sync_mempools(rpc_connections, *, wait=1, timeout=60): """ Wait until everybody has the same transactions in their memory pools """ while timeout > 0: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match + 1 if num_match == len(rpc_connections): return time.sleep(wait) timeout -= wait raise AssertionError("Mempool sync failed") bitcoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node" + str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) rpc_u, rpc_p = rpc_auth_pair(n) with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f: f.write("regtest=1\n") f.write("rpcuser=" + rpc_u + "\n") f.write("rpcpassword=" + rpc_p + "\n") f.write("port=" + str(p2p_port(n)) + "\n") f.write("rpcport=" + str(rpc_port(n)) + "\n") f.write("listenonion=0\n") return datadir def rpc_auth_pair(n): return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n) def rpc_url(i, rpchost=None): rpc_u, rpc_p = rpc_auth_pair(i) host = '127.0.0.1' port = rpc_port(i) if rpchost: parts = rpchost.split(':') if len(parts) == 2: host, port = parts else: host = rpchost return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) def wait_for_bitcoind_start(process, url, i): ''' Wait for bitcoind to start. This means that RPC is accessible and fully initialized. Raise an exception if bitcoind exits during initialization. ''' while True: if process.poll() is not None: raise Exception( 'bitcoind exited with status %i during initialization' % process.returncode) try: rpc = get_rpc_proxy(url, i) blocks = rpc.getblockcount() break # break out of loop on success except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase if e.error['code'] != -28: # RPC in warmup? raise # unknown JSON RPC exception time.sleep(0.25) def initialize_chain(test_dir, num_nodes, cachedir): """ Create a cache of a 200-block-long chain (with wallet) for MAX_NODES Afterward, create num_nodes copies from the cache. """ assert num_nodes <= MAX_NODES create_cache = False for i in range(MAX_NODES): if not os.path.isdir(os.path.join(cachedir, 'node' + str(i))): create_cache = True break if create_cache: logger.debug("Creating data directories from cached datadir") # Find and delete old cache directories if any exist for i in range(MAX_NODES): if os.path.isdir(os.path.join(cachedir, "node" + str(i))): shutil.rmtree(os.path.join(cachedir, "node" + str(i))) # Create cache directories, run bitcoinds: for i in range(MAX_NODES): datadir = initialize_datadir(cachedir, i) args = [os.getenv("BITCOIND", "bitcoind"), "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"] if i > 0: args.append("-connect=127.0.0.1:" + str(p2p_port(0))) bitcoind_processes[i] = subprocess.Popen(args) logger.debug("Creating data directories from cached datadir") wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i) logger.debug("initialize_chain: RPC successfully started") rpcs = [] for i in range(MAX_NODES): try: rpcs.append(get_rpc_proxy(rpc_url(i), i)) except: sys.stderr.write("Error connecting to " + url + "\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 first nodes # gets 25 mature blocks and 25 immature. # Note: To preserve compatibility with older versions of # initialize_chain, only 4 nodes will generate coins. # # blocks are created with timestamps 10 minutes apart # starting from 2010 minutes in the past enable_mocktime() block_time = get_mocktime() - (201 * 10 * 60) for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].generate(1) block_time += 10 * 60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) disable_mocktime() for i in range(MAX_NODES): os.remove(log_filename(cachedir, i, "debug.log")) os.remove(log_filename(cachedir, i, "db.log")) os.remove(log_filename(cachedir, i, "peers.dat")) os.remove(log_filename(cachedir, i, "fee_estimates.dat")) for i in range(num_nodes): from_dir = os.path.join(cachedir, "node" + str(i)) to_dir = os.path.join(test_dir, "node" + str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir = initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def locate_bitcoind_binary(): """ Find bitcoind binary if possible. """ bitcoind_binary = os.getenv("BITCOIND", DEFAULT_BITCOIND) if os.path.exists(bitcoind_binary): return bitcoind_binary if os.path.exists(os.path.join('src', DEFAULT_BITCOIND)): bitcoind_binary = os.path.abspath( os.path.join('src', DEFAULT_BITCOIND)) elif bitcoind_binary == 'bitcoind' or not os.path.exists(bitcoind_binary): # If BITCOIND was specified and exists, use it, otherwise look for source. # get_srcdir() already returns an absolute path src_dir_cand = get_srcdir(sys.argv[0]) if src_dir_cand and os.path.exists( os.path.join(src_dir_cand, 'src', DEFAULT_BITCOIND)): bitcoind_binary = os.path.join( src_dir_cand, 'src', DEFAULT_BITCOIND) else: sys.stderr.write("Unable to locate bitcoind for this test.\n") sys.exit(1) return bitcoind_binary def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr_checker=None): """ Start a bitcoind and return RPC connection to it. If stderr_checker is provided, it must be an OutputChecker. Its output_file_obj will be connected to the stderr of the bitcoind process. """ datadir = os.path.join(dirname, "node" + str(i)) if binary is None: binary = locate_bitcoind_binary() args = [binary, "-datadir=" + datadir, "-server", "-keypool=1", - "-discover=0", "-rest", "-logtimemicros", "-debug", "-mocktime=" + str(get_mocktime())] + "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(get_mocktime()), "-uacomment=testnode%d" % i] if extra_args is not None: args.extend(extra_args) if stderr_checker: assert(isinstance(stderr_checker, OutputChecker)) bitcoind_processes[i] = subprocess.Popen(args, universal_newlines=True, stderr=stderr_checker.get_connector()) else: bitcoind_processes[i] = subprocess.Popen(args) logger.debug( "initialize_chain: bitcoind started, waiting for RPC to come up") url = rpc_url(i, rpchost) wait_for_bitcoind_start(bitcoind_processes[i], url, i) logger.debug("initialize_chain: RPC successfully started") proxy = get_rpc_proxy(url, i, timeout=timewait) if COVERAGE_DIR: coverage.write_all_rpc_commands(COVERAGE_DIR, proxy) return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr_checkers=None): """ Start multiple bitcoinds, return RPC connections to them stderr_checkers is a list which can contain OutputCheckers or None for each of the nodes. if a test calls start_nodes and provides an OutputChecker for a node, this will be connected to the stderr output of the node. """ if extra_args is None: extra_args = [None for _ in range(num_nodes)] + assert_equal(len(extra_args), num_nodes) if stderr_checkers is None: stderr_checkers = [None for _ in range(num_nodes)] + assert_equal(len(stderr_checkers), num_nodes) if binary is None: binary = [None for _ in range(num_nodes)] + assert_equal(len(binary), num_nodes) rpcs = [] try: for i in range(num_nodes): rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr_checker=stderr_checkers[i])) except: # If one node failed to start, stop the others stop_nodes(rpcs) raise return rpcs def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node" + str(n_node), "regtest", logname) def stop_node(node, i): logger.debug("Stopping node %d" % i) try: node.stop() except http.client.CannotSendRequest as e: logger.exception("Unable to stop node") return_code = bitcoind_processes[i].wait( timeout=BITCOIND_PROC_WAIT_TIMEOUT) assert_equal(return_code, 0) del bitcoind_processes[i] def stop_nodes(nodes): for i, node in enumerate(nodes): stop_node(node, i) assert not bitcoind_processes.values() # All connections must be gone now def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) +def disconnect_nodes(from_connection, node_num): + for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]: + from_connection.disconnectnode(nodeid=peer_id) + + for _ in range(50): + if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []: + break + time.sleep(0.1) + else: + raise AssertionError("timed out waiting for disconnect") + + def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:" + str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError( "find_output txid %s : %s not found" % (txid, str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >= 0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append( {"txid": t["txid"], "vout": t["vout"], "address": t["address"]}) if total_in < amount_needed: raise RuntimeError( "Insufficient funds: need %d, have %d" % (amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out + fee change = amount_in - amount if change > amount * 2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change / 2).quantize( Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using its output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount + fee * 2) outputs = make_change(from_node, total_in, amount + fee, fee) outputs[self_address] = float(amount + fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction( self_rawtx, None, None, "ALL|FORKID") self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount + fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [{"txid": self_txid, "vout": vout}] outputs = {to_node.getnewaddress(): float(amount)} rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx, None, None, "ALL|FORKID") txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment * random.randint(0, fee_variants) (total_in, inputs) = gather_inputs(from_node, amount + fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx, None, None, "ALL|FORKID") txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_fee_amount(fee, tx_size, fee_per_kB): """Assert the fee was in range""" target_fee = tx_size * fee_per_kB / 1000 if fee < target_fee: raise AssertionError( "Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee))) # allow the wallet's estimation to be at most 2 bytes off if fee > (tx_size + 2) * fee_per_kB / 1000: raise AssertionError( "Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee))) def assert_equal(thing1, thing2, *args): if thing1 != thing2 or any(thing1 != arg for arg in args): raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) def assert_greater_than_or_equal(thing1, thing2): if thing1 < thing2: raise AssertionError("%s < %s" % (str(thing1), str(thing2))) def assert_raises(exc, fun, *args, **kwds): assert_raises_message(exc, None, fun, *args, **kwds) def assert_raises_message(exc, message, fun, *args, **kwds): try: fun(*args, **kwds) except exc as e: if message is not None and message not in e.error['message']: raise AssertionError( "Expected substring not found:" + e.error['message']) except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_raises_jsonrpc(code, message, fun, *args, **kwds): """Run an RPC and verify that a specific JSONRPC exception code and message is raised. Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException and verifies that the error code and message are as expected. Throws AssertionError if no JSONRPCException was returned or if the error code/message are not as expected. Args: code (int), optional: the error code returned by the RPC call (defined in src/rpc/protocol.h). Set to None if checking the error code is not required. message (string), optional: [a substring of] the error string returned by the RPC call. Set to None if checking the error string is not required fun (function): the function to call. This should be the name of an RPC. args*: positional arguments for the function. kwds**: named arguments for the function. """ try: fun(*args, **kwds) except JSONRPCException as e: # JSONRPCException was thrown as expected. Check the code and message # values are correct. if (code is not None) and (code != e.error["code"]): raise AssertionError( "Unexpected JSONRPC error code %i" % e.error["code"]) if (message is not None) and (message not in e.error['message']): raise AssertionError( "Expected substring not found:" + e.error['message']) except Exception as e: raise AssertionError( "Unexpected exception raised: " + type(e).__name__) else: raise AssertionError("No exception raised") def assert_is_hex_string(string): try: int(string, 16) except Exception as e: raise AssertionError( "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) def assert_is_hash_string(string, length=64): if not isinstance(string, str): raise AssertionError("Expected a string, got type %r" % type(string)) elif length and len(string) != length: raise AssertionError( "String of length %d expected; got %d" % (length, len(string))) elif not re.match('[abcdef0-9]+$', string): raise AssertionError( "String %r contains invalid characters for a hash." % string) def assert_array_result(object_array, to_match, expected, should_not_find=False): """ Pass in array of JSON objects, a dictionary with key/value pairs to match against, and another dictionary with expected key/value pairs. If the should_not_find flag is true, to_match should not be found in object_array """ if should_not_find == True: assert_equal(expected, {}) num_matched = 0 for item in object_array: all_match = True for key, value in to_match.items(): if item[key] != value: all_match = False if not all_match: continue elif should_not_find == True: num_matched = num_matched + 1 for key, value in expected.items(): if item[key] != value: raise AssertionError( "%s : expected %s=%s" % (str(item), str(key), str(value))) num_matched = num_matched + 1 if num_matched == 0 and should_not_find != True: raise AssertionError("No objects matched %s" % (str(to_match))) if num_matched > 0 and should_not_find == True: raise AssertionError("Objects were found %s" % (str(to_match))) def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) # Helper to create at least "count" utxos # Pass in a fee that is sufficient for relay and mining new transactions. def create_confirmed_utxos(fee, node, count): node.generate(int(0.5 * count) + 101) utxos = node.listunspent() iterations = count - len(utxos) addr1 = node.getnewaddress() addr2 = node.getnewaddress() if iterations <= 0: return utxos for i in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) outputs = {} send_value = t['amount'] - fee outputs[addr1] = satoshi_round(send_value / 2) outputs[addr2] = satoshi_round(send_value / 2) raw_tx = node.createrawtransaction(inputs, outputs) signed_tx = node.signrawtransaction( raw_tx, None, None, "ALL|FORKID")["hex"] txid = node.sendrawtransaction(signed_tx) while (node.getmempoolinfo()['size'] > 0): node.generate(1) utxos = node.listunspent() assert(len(utxos) >= count) return utxos # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): # Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes for i in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before # the txout for change txouts = "81" for k in range(128): # add txout value txouts = txouts + "0000000000000000" # add length of script_pubkey txouts = txouts + "fd0402" # add script_pubkey txouts = txouts + script_pubkey return txouts def create_tx(node, coinbase, to_address, amount): inputs = [{"txid": coinbase, "vout": 0}] outputs = {to_address: amount} rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx, None, None, "ALL|FORKID") assert_equal(signresult["complete"], True) return signresult["hex"] # Create a spend of each passed-in utxo, splicing in "txouts" to each raw # transaction to make it large. See gen_return_txouts() above. def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] outputs = {} change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) newtx = rawtx[0:92] newtx = newtx + txouts newtx = newtx + rawtx[94:] signresult = node.signrawtransaction(newtx, None, None, "NONE|FORKID") txid = node.sendrawtransaction(signresult["hex"], True) txids.append(txid) return txids def mine_large_block(node, utxos=None): # generate a 66k transaction, # and 14 of them is close to the 1MB block limit num = 14 txouts = gen_return_txouts() utxos = utxos if utxos is not None else [] if len(utxos) < num: utxos.clear() utxos.extend(node.listunspent()) fee = 100 * node.getnetworkinfo()["relayfee"] create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee) node.generate(1) def get_bip9_status(node, key): info = node.getblockchaininfo() return info['bip9_softforks'][key] def get_srcdir(calling_script=None): """ Try to find out the base folder containing the 'src' folder. If SRCDIR is set it does a sanity check and returns that. Otherwise it goes on a search and rescue mission. Returns None if it cannot find a suitable folder. """ def contains_src(path_to_check): if not path_to_check: return False else: cand_path = os.path.join(path_to_check, 'src') return os.path.exists(cand_path) and os.path.isdir(cand_path) srcdir = os.environ.get('SRCDIR', '') if contains_src(srcdir): return srcdir # If we have a caller, try to guess from its location where the # top level might be. if calling_script: caller_basedir = os.path.dirname( os.path.dirname(os.path.dirname(calling_script))) if caller_basedir != '' and contains_src(os.path.abspath(caller_basedir)): return os.path.abspath(caller_basedir) # Try to work it based out on main module # We might expect the caller to be rpc-tests.py or a test script # itself. mainmod = sys.modules['__main__'] mainmod_path = getattr(mainmod, '__file__', '') if mainmod_path and mainmod_path.endswith('.py'): maybe_top = os.path.dirname( os.path.dirname(os.path.dirname(mainmod_path))) if contains_src(os.path.abspath(maybe_top)): return os.path.abspath(maybe_top) # No luck, give up. return None diff --git a/qa/rpc-tests/txn_clone.py b/qa/rpc-tests/txn_clone.py index dd6a1ed68..89739b3f8 100755 --- a/qa/rpc-tests/txn_clone.py +++ b/qa/rpc-tests/txn_clone.py @@ -1,170 +1,172 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test proper accounting with an equivalent malleability clone # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class TxnMallTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def add_options(self, parser): parser.add_option( "--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: - return super(TxnMallTest, self).setup_network(True) + super(TxnMallTest, self).setup_network() + disconnect_nodes(self.nodes[1], 2) + disconnect_nodes(self.nodes[2], 1) def run_test(self): # All nodes should start with 1,250 BTC: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) # bug workaround, coins generated assigned to first getnewaddress! self.nodes[i].getnewaddress("") # Assign coins to foo and bar accounts: self.nodes[0].settxfee(.001) node0_address_foo = self.nodes[0].getnewaddress("foo") fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar") fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # Send tx1, and another transaction tx2 that won't be cloned txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Construct a clone of tx1, to be malleated rawtx1 = self.nodes[0].getrawtransaction(txid1, 1) clone_inputs = [ {"txid": rawtx1["vin"][0]["txid"], "vout":rawtx1["vin"][0]["vout"]}] clone_outputs = {rawtx1[ "vout"][0]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][0]["value"], rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]: rawtx1["vout"][1]["value"]} clone_locktime = rawtx1["locktime"] clone_raw = self.nodes[0].createrawtransaction( clone_inputs, clone_outputs, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. # output 0 is at version+#inputs+input+sigstub+sequence+#outputs # 40 BTC serialized is 00286bee00000000 pos0 = 2 * (4 + 1 + 36 + 1 + 4 + 1) hex40 = "00286bee00000000" output_len = 16 + 2 + 2 * \ int("0x" + clone_raw[pos0 + 16: pos0 + 16 + 2], 0) if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0: pos0 + 16] != hex40 or rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0: pos0 + 16] == hex40): output0 = clone_raw[pos0: pos0 + output_len] output1 = clone_raw[pos0 + output_len: pos0 + 2 * output_len] clone_raw = clone_raw[:pos0] + output1 + \ output0 + clone_raw[pos0 + 2 * output_len:] # Use a different signature hash type to sign. This creates an equivalent but malleated clone. # Don't send the clone anywhere yet tx1_clone = self.nodes[0].signrawtransaction( clone_raw, None, None, "ALL|FORKID|ANYONECANPAY") assert_equal(tx1_clone["complete"], True) # Have node0 mine a block, if requested: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus tx1 and tx2 amounts, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance( "foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance( "bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance( "from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Send clone and its parent to miner self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], 2) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) self.nodes[2].sendrawtransaction(tx2["hex"]) self.nodes[2].generate(1) # Mine another block to make sure we sync sync_blocks(self.nodes) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx1_clone = self.nodes[0].gettransaction(txid1_clone) tx2 = self.nodes[0].gettransaction(txid2) # Verify expected confirmations assert_equal(tx1["confirmations"], -2) assert_equal(tx1_clone["confirmations"], 2) assert_equal(tx2["confirmations"], 1) # Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured, # less possible orphaned matured subsidy expected += 100 if (self.options.mine_block): expected -= 50 assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*", 0), expected) # Check node0's individual account balances. # "foo" should have been debited by the equivalent clone of tx1 assert_equal(self.nodes[0].getbalance( "foo"), 1219 + tx1["amount"] + tx1["fee"]) # "bar" should have been debited by (possibly unconfirmed) tx2 assert_equal(self.nodes[0].getbalance( "bar", 0), 29 + tx2["amount"] + tx2["fee"]) # "" should have starting balance, less funding txes, plus subsidies assert_equal(self.nodes[0].getbalance("", 0), starting_balance - 1219 + fund_foo_tx["fee"] - 29 + fund_bar_tx["fee"] + 100) # Node1's "from0" account balance assert_equal(self.nodes[1].getbalance( "from0", 0), -(tx1["amount"] + tx2["amount"])) if __name__ == '__main__': TxnMallTest().main() diff --git a/qa/rpc-tests/txn_doublespend.py b/qa/rpc-tests/txn_doublespend.py index 95fdd4a01..8ba1f7a12 100755 --- a/qa/rpc-tests/txn_doublespend.py +++ b/qa/rpc-tests/txn_doublespend.py @@ -1,151 +1,153 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test proper accounting with a double-spend conflict # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class TxnMallTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 self.setup_clean_chain = False def add_options(self, parser): parser.add_option( "--mineblock", dest="mine_block", default=False, action="store_true", help="Test double-spend of 1-confirmed transaction") def setup_network(self): # Start with split network: - return super(TxnMallTest, self).setup_network(True) + super().setup_network() + disconnect_nodes(self.nodes[1], 2) + disconnect_nodes(self.nodes[2], 1) def run_test(self): # All nodes should start with 1,250 BTC: starting_balance = 1250 for i in range(4): assert_equal(self.nodes[i].getbalance(), starting_balance) # bug workaround, coins generated assigned to first getnewaddress! self.nodes[i].getnewaddress("") # Assign coins to foo and bar accounts: node0_address_foo = self.nodes[0].getnewaddress("foo") fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219) fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid) node0_address_bar = self.nodes[0].getnewaddress("bar") fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29) fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"]) # Coins are sent to node1_address node1_address = self.nodes[1].getnewaddress("from0") # First: use raw transaction API to send 1240 BTC to node1_address, # but don't broadcast: doublespend_fee = Decimal('-.02') rawtx_input_0 = {} rawtx_input_0["txid"] = fund_foo_txid rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219) rawtx_input_1 = {} rawtx_input_1["txid"] = fund_bar_txid rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29) inputs = [rawtx_input_0, rawtx_input_1] change_address = self.nodes[0].getnewaddress() outputs = {} outputs[node1_address] = 1240 outputs[change_address] = 1248 - 1240 + doublespend_fee rawtx = self.nodes[0].createrawtransaction(inputs, outputs) doublespend = self.nodes[0].signrawtransaction( rawtx, None, None, "ALL|FORKID") assert_equal(doublespend["complete"], True) # Create two spends using 1 50 BTC coin each txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0) txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0) # Have node0 mine a block: if (self.options.mine_block): self.nodes[0].generate(1) sync_blocks(self.nodes[0:2]) tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Node0's balance should be starting balance, plus 50BTC for another # matured block, minus 40, minus 20, and minus transaction fees: expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] if self.options.mine_block: expected += 50 expected += tx1["amount"] + tx1["fee"] expected += tx2["amount"] + tx2["fee"] assert_equal(self.nodes[0].getbalance(), expected) # foo and bar accounts should be debited: assert_equal(self.nodes[0].getbalance( "foo", 0), 1219 + tx1["amount"] + tx1["fee"]) assert_equal(self.nodes[0].getbalance( "bar", 0), 29 + tx2["amount"] + tx2["fee"]) if self.options.mine_block: assert_equal(tx1["confirmations"], 1) assert_equal(tx2["confirmations"], 1) # Node1's "from0" balance should be both transaction amounts: assert_equal(self.nodes[1].getbalance( "from0"), -(tx1["amount"] + tx2["amount"])) else: assert_equal(tx1["confirmations"], 0) assert_equal(tx2["confirmations"], 0) # Now give doublespend and its parents to miner: self.nodes[2].sendrawtransaction(fund_foo_tx["hex"]) self.nodes[2].sendrawtransaction(fund_bar_tx["hex"]) doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"]) # ... mine a block... self.nodes[2].generate(1) # Reconnect the split network, and sync chain: connect_nodes(self.nodes[1], 2) # Mine another block to make sure we sync self.nodes[2].generate(1) sync_blocks(self.nodes) assert_equal(self.nodes[0].gettransaction( doublespend_txid)["confirmations"], 2) # Re-fetch transaction info: tx1 = self.nodes[0].gettransaction(txid1) tx2 = self.nodes[0].gettransaction(txid2) # Both transactions should be conflicted assert_equal(tx1["confirmations"], -2) assert_equal(tx2["confirmations"], -2) # Node0's total balance should be starting balance, plus 100BTC for # two more matured blocks, minus 1240 for the double-spend, plus fees (which are # negative): expected = starting_balance + 100 - 1240 + \ fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee assert_equal(self.nodes[0].getbalance(), expected) assert_equal(self.nodes[0].getbalance("*"), expected) # Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies + # fees (which are negative) assert_equal(self.nodes[0].getbalance("foo"), 1219) assert_equal(self.nodes[0].getbalance("bar"), 29) assert_equal(self.nodes[0].getbalance(""), starting_balance - 1219 - 29 - 1240 + 100 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee) # Node1's "from0" account balance should be just the doublespend: assert_equal(self.nodes[1].getbalance("from0"), 1240) if __name__ == '__main__': TxnMallTest().main() diff --git a/qa/rpc-tests/wallet-accounts.py b/qa/rpc-tests/wallet-accounts.py index d1dbfb808..7eb6f9b9b 100755 --- a/qa/rpc-tests/wallet-accounts.py +++ b/qa/rpc-tests/wallet-accounts.py @@ -1,95 +1,90 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( start_nodes, start_node, assert_equal, connect_nodes_bi, ) class WalletAccountsTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 1 - self.node_args = [[]] - - def setup_network(self): - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, self.node_args) - self.is_network_split = False + self.extra_args = [[]] def run_test(self): node = self.nodes[0] # Check that there's no UTXO on any of the nodes assert_equal(len(node.listunspent()), 0) node.generate(101) assert_equal(node.getbalance(), 50) accounts = ["a", "b", "c", "d", "e"] amount_to_send = 1.0 account_addresses = dict() for account in accounts: address = node.getaccountaddress(account) account_addresses[account] = address node.getnewaddress(account) assert_equal(node.getaccount(address), account) assert(address in node.getaddressesbyaccount(account)) node.sendfrom("", address, amount_to_send) node.generate(1) for i in range(len(accounts)): from_account = accounts[i] to_account = accounts[(i + 1) % len(accounts)] to_address = account_addresses[to_account] node.sendfrom(from_account, to_address, amount_to_send) node.generate(1) for account in accounts: address = node.getaccountaddress(account) assert(address != account_addresses[account]) assert_equal(node.getreceivedbyaccount(account), 2) node.move(account, "", node.getbalance(account)) node.generate(101) expected_account_balances = {"": 5200} for account in accounts: expected_account_balances[account] = 0 assert_equal(node.listaccounts(), expected_account_balances) assert_equal(node.getbalance(""), 5200) for account in accounts: address = node.getaccountaddress("") node.setaccount(address, account) assert(address in node.getaddressesbyaccount(account)) assert(address not in node.getaddressesbyaccount("")) for account in accounts: addresses = [] for x in range(10): addresses.append(node.getnewaddress()) multisig_address = node.addmultisigaddress(5, addresses, account) node.sendfrom("", multisig_address, 50) node.generate(101) for account in accounts: assert_equal(node.getbalance(account), 50) if __name__ == '__main__': WalletAccountsTest().main() diff --git a/qa/rpc-tests/wallet-hd.py b/qa/rpc-tests/wallet-hd.py index 474bc2fb0..1d35a5169 100755 --- a/qa/rpc-tests/wallet-hd.py +++ b/qa/rpc-tests/wallet-hd.py @@ -1,90 +1,84 @@ #!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( start_nodes, start_node, assert_equal, connect_nodes_bi, ) import os import shutil class WalletHDTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 - self.node_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']] - - def setup_network(self): - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, self.node_args) - self.is_network_split = False - connect_nodes_bi(self.nodes, 0, 1) + self.extra_args = [['-usehd=0'], ['-usehd=1', '-keypool=0']] def run_test(self): tmpdir = self.options.tmpdir # Make sure we use hd, keep masterkeyid masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid'] assert_equal(len(masterkeyid), 40) # Import a non-HD private key in the HD wallet non_hd_add = self.nodes[0].getnewaddress() self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add)) # This should be enough to keep the master key and the non-HD key self.nodes[1].backupwallet(tmpdir + "/hd.bak") # self.nodes[1].dumpwallet(tmpdir + "/hd.dump") # Derive some HD addresses and remember the last # Also send funds to each add self.nodes[0].generate(101) hd_add = None num_hd_adds = 300 for i in range(num_hd_adds): hd_add = self.nodes[1].getnewaddress() hd_info = self.nodes[1].validateaddress(hd_add) assert_equal(hd_info["hdkeypath"], "m/0'/0'/" + str(i + 1) + "'") assert_equal(hd_info["hdmasterkeyid"], masterkeyid) self.nodes[0].sendtoaddress(hd_add, 1) self.nodes[0].generate(1) self.nodes[0].sendtoaddress(non_hd_add, 1) self.nodes[0].generate(1) self.sync_all() assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) self.log.info("Restore backup ...") self.stop_node(1) os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat") shutil.copyfile( tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat") - self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1]) + self.nodes[1] = start_node(1, self.options.tmpdir, self.extra_args[1]) # connect_nodes_bi(self.nodes, 0, 1) # Assert that derivation is deterministic hd_add_2 = None for _ in range(num_hd_adds): hd_add_2 = self.nodes[1].getnewaddress() hd_info_2 = self.nodes[1].validateaddress(hd_add_2) assert_equal(hd_info_2["hdkeypath"], "m/0'/0'/" + str(_ + 1) + "'") assert_equal(hd_info_2["hdmasterkeyid"], masterkeyid) assert_equal(hd_add, hd_add_2) # Needs rescan self.stop_node(1) self.nodes[1] = start_node( - 1, self.options.tmpdir, self.node_args[1] + ['-rescan']) + 1, self.options.tmpdir, self.extra_args[1] + ['-rescan']) # connect_nodes_bi(self.nodes, 0, 1) assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1) if __name__ == '__main__': WalletHDTest().main() diff --git a/qa/rpc-tests/wallet.py b/qa/rpc-tests/wallet.py index b384dae1b..01684871b 100755 --- a/qa/rpc-tests/wallet.py +++ b/qa/rpc-tests/wallet.py @@ -1,442 +1,441 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class WalletTest (BitcoinTestFramework): def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size): """Return curr_balance after asserting the fee was in range""" fee = balance_with_fee - curr_balance assert_fee_amount(fee, tx_size, fee_per_byte * 1000) return curr_balance def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 self.extra_args = [['-usehd={:d}'.format(i % 2 == 0)] for i in range(4)] - def setup_network(self, split=False): + def setup_network(self): self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3]) connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) - self.is_network_split = False self.sync_all() def run_test(self): # Check that there's no UTXO on none of the nodes assert_equal(len(self.nodes[0].listunspent()), 0) assert_equal(len(self.nodes[1].listunspent()), 0) assert_equal(len(self.nodes[2].listunspent()), 0) self.log.info("Mining blocks...") self.nodes[0].generate(1) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 50) assert_equal(walletinfo['balance'], 0) self.sync_all() self.nodes[1].generate(101) self.sync_all() assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 0) # Check that only first and second nodes have UTXOs assert_equal(len(self.nodes[0].listunspent()), 1) assert_equal(len(self.nodes[1].listunspent()), 1) assert_equal(len(self.nodes[2].listunspent()), 0) # Send 21 BTC from 0 to 2 using sendtoaddress call. self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) walletinfo = self.nodes[0].getwalletinfo() assert_equal(walletinfo['immature_balance'], 0) # Have node0 mine a block, thus it will collect its own fee. self.nodes[0].generate(1) self.sync_all() # Exercise locking of unspent outputs unspent_0 = self.nodes[2].listunspent()[0] unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]} self.nodes[2].lockunspent(False, [unspent_0]) assert_raises_message(JSONRPCException, "Insufficient funds", self.nodes[ 2].sendtoaddress, self.nodes[2].getnewaddress(), 20) assert_equal([unspent_0], self.nodes[2].listlockunspent()) self.nodes[2].lockunspent(True, [unspent_0]) assert_equal(len(self.nodes[2].listlockunspent()), 0) # Have node1 generate 100 blocks (so node0 can recover the fee) self.nodes[1].generate(100) self.sync_all() # node0 should end up with 100 btc in block rewards plus fees, but # minus the 21 plus fees sent to node2 assert_equal(self.nodes[0].getbalance(), 100 - 21) assert_equal(self.nodes[2].getbalance(), 21) # Node0 should have two unspent outputs. # Create a couple of transactions to send them to node2, submit them through # node1, and make sure both node0 and node2 pick them up properly: node0utxos = self.nodes[0].listunspent(1) assert_equal(len(node0utxos), 2) # create both transactions txns_to_send = [] for utxo in node0utxos: inputs = [] outputs = {} inputs.append({"txid": utxo["txid"], "vout": utxo["vout"]}) outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3 raw_tx = self.nodes[0].createrawtransaction(inputs, outputs) txns_to_send.append( self.nodes[0].signrawtransaction(raw_tx, None, None, "ALL|FORKID")) # Have node 1 (miner) send the transactions self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True) self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True) # Have node1 mine a block to confirm transactions: self.nodes[1].generate(1) self.sync_all() assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 94) assert_equal(self.nodes[2].getbalance("from1"), 94 - 21) # Send 10 BTC normal address = self.nodes[0].getnewaddress("test") fee_per_byte = Decimal('0.001') / 1000 self.nodes[2].settxfee(fee_per_byte * 1000) txid = self.nodes[2].sendtoaddress(address, 10, "", "", False) self.nodes[2].generate(1) self.sync_all() node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal( '84'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) assert_equal(self.nodes[0].getbalance(), Decimal('10')) # Send 10 BTC with subtract fee from amount txid = self.nodes[2].sendtoaddress(address, 10, "", "", True) self.nodes[2].generate(1) self.sync_all() node_2_bal -= Decimal('10') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal( '20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) # Sendmany 10 BTC txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", []) self.nodes[2].generate(1) self.sync_all() node_0_bal += Decimal('10') node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal( '10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) assert_equal(self.nodes[0].getbalance(), node_0_bal) # Sendmany 10 BTC with subtract fee from amount txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address]) self.nodes[2].generate(1) self.sync_all() node_2_bal -= Decimal('10') assert_equal(self.nodes[2].getbalance(), node_2_bal) node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal( '10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid))) # Test ResendWalletTransactions: # Create a couple of transactions, then start up a fourth # node (nodes[3]) and ask nodes[0] to rebroadcast. # EXPECT: nodes[3] should have those transactions in its mempool. txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1) sync_mempools(self.nodes) self.nodes.append( start_node(3, self.options.tmpdir, self.extra_args[3])) connect_nodes_bi(self.nodes, 0, 3) sync_blocks(self.nodes) relayed = self.nodes[0].resendwallettransactions() assert_equal(set(relayed), {txid1, txid2}) sync_mempools(self.nodes) assert(txid1 in self.nodes[3].getrawmempool()) # Exercise balance rpcs assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1) assert_equal(self.nodes[0].getunconfirmedbalance(), 1) # check if we can list zero value tx as available coins # 1. create rawtx # 2. hex-changed one output to 0.0 # 3. sign and send # 4. check if recipient (node0) can list the zero value tx usp = self.nodes[1].listunspent() inputs = [{"txid": usp[0]['txid'], "vout":usp[0]['vout']}] outputs = { self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11} rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace( "c0833842", "00000000") # replace 11.11 with 0.0 (int32) decRawTx = self.nodes[1].decoderawtransaction(rawTx) signedRawTx = self.nodes[ 1].signrawtransaction(rawTx, None, None, "ALL|FORKID") decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex']) zeroValueTxid = decRawTx['txid'] sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex']) self.sync_all() self.nodes[1].generate(1) # mine a block self.sync_all() unspentTxs = self.nodes[ 0].listunspent() # zero value tx must be in listunspents output found = False for uTx in unspentTxs: if uTx['txid'] == zeroValueTxid: found = True assert_equal(uTx['amount'], Decimal('0')) assert(found) # do some -walletbroadcast tests stop_nodes(self.nodes) extra_args = [["-walletbroadcast=0"] for i in range(3)] self.nodes = start_nodes(3, self.options.tmpdir, extra_args) connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) self.sync_all() txIdNotBroadcasted = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 2) txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) # mine a block, tx should not be in there self.nodes[1].generate(1) self.sync_all() # should not be changed because tx was not broadcasted assert_equal(self.nodes[2].getbalance(), node_2_bal) # now broadcast from another node, mine a block, sync, and check the # balance self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex']) self.nodes[1].generate(1) self.sync_all() node_2_bal += 2 txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted) assert_equal(self.nodes[2].getbalance(), node_2_bal) # create another tx txIdNotBroadcasted = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), 2) # restart the nodes with -walletbroadcast=1 stop_nodes(self.nodes) self.nodes = start_nodes(3, self.options.tmpdir) connect_nodes_bi(self.nodes, 0, 1) connect_nodes_bi(self.nodes, 1, 2) connect_nodes_bi(self.nodes, 0, 2) sync_blocks(self.nodes) self.nodes[0].generate(1) sync_blocks(self.nodes) node_2_bal += 2 # tx should be added to balance because after restarting the nodes tx # should be broadcastet assert_equal(self.nodes[2].getbalance(), node_2_bal) # send a tx with value in a string (PR#6380 +) txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-2')) txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "0.0001") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) # check if JSON parser can handle scientific notation in strings txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "1e-4") txObj = self.nodes[0].gettransaction(txId) assert_equal(txObj['amount'], Decimal('-0.0001')) try: txId = self.nodes[0].sendtoaddress( self.nodes[2].getnewaddress(), "1f-4") except JSONRPCException as e: assert("Invalid amount" in e.error['message']) else: raise AssertionError("Must not parse invalid amounts") try: self.nodes[0].generate("2") raise AssertionError("Must not accept strings as numeric") except JSONRPCException as e: assert("not an integer" in e.error['message']) # Import address and private key to check correct behavior of spendable unspents # 1. Send some coins to generate new UTXO address_to_import = self.nodes[2].getnewaddress() txid = self.nodes[0].sendtoaddress(address_to_import, 1) self.nodes[0].generate(1) self.sync_all() # 2. Import address from node2 to node1 self.nodes[1].importaddress(address_to_import) # 3. Validate that the imported address is watch-only on node1 assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"]) # 4. Check that the unspents after import are not spendable assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": False}) # 5. Import private key of the previously imported address on node1 priv_key = self.nodes[2].dumpprivkey(address_to_import) self.nodes[1].importprivkey(priv_key) # 6. Check that the unspents are now spendable on node1 assert_array_result(self.nodes[1].listunspent(), {"address": address_to_import}, {"spendable": True}) # Mine a block from node0 to an address from node1 cbAddr = self.nodes[1].getnewaddress() blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0] cbTxId = self.nodes[0].getblock(blkHash)['tx'][0] self.sync_all() # Check that the txid and balance is found by node1 self.nodes[1].gettransaction(cbTxId) # check if wallet or blockchain maintenance changes the balance self.sync_all() blocks = self.nodes[0].generate(2) self.sync_all() balance_nodes = [self.nodes[i].getbalance() for i in range(3)] block_count = self.nodes[0].getblockcount() # Check modes: # - True: unicode escaped as \u.... # - False: unicode directly as UTF-8 for mode in [True, False]: self.nodes[0].ensure_ascii = mode # unicode check: Basic Multilingual Plane, Supplementary Plane # respectively for s in [u'рыба', u'𝅘𝅥𝅯']: addr = self.nodes[0].getaccountaddress(s) label = self.nodes[0].getaccount(addr) assert_equal(label, s) assert(s in self.nodes[0].listaccounts().keys()) self.nodes[0].ensure_ascii = True # restore to default # maintenance tests maintenance = [ '-rescan', '-reindex', '-zapwallettxes=1', '-zapwallettxes=2', # disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463 # '-salvagewallet', ] chainlimit = 6 for m in maintenance: self.log.info("check " + m) stop_nodes(self.nodes) # set lower ancestor limit for later self.nodes = start_nodes(3, self.options.tmpdir, [[m, "-limitancestorcount=" + str(chainlimit)]] * 3) while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]: # reindex will leave rpc warm up "early"; Wait for it to finish time.sleep(0.1) assert_equal( balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) # Exercise listsinceblock with the last two blocks coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0]) assert_equal(coinbase_tx_1["lastblock"], blocks[1]) assert_equal(len(coinbase_tx_1["transactions"]), 1) assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1]) assert_equal( len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0) # ==Check that wallet prefers to use coins that don't exceed mempool li # Get all non-zero utxos together chain_addrs = [ self.nodes[0].getnewaddress(), self.nodes[0].getnewaddress()] singletxid = self.nodes[0].sendtoaddress( chain_addrs[0], self.nodes[0].getbalance(), "", "", True) self.nodes[0].generate(1) node0_balance = self.nodes[0].getbalance() # Split into two chains rawtx = self.nodes[0].createrawtransaction([{"txid": singletxid, "vout": 0}], { chain_addrs[0]: node0_balance / 2 - Decimal('0.01'), chain_addrs[1]: node0_balance / 2 - Decimal('0.01')}) signedtx = self.nodes[0].signrawtransaction( rawtx, None, None, "ALL|FORKID") singletxid = self.nodes[0].sendrawtransaction(signedtx["hex"]) self.nodes[0].generate(1) # Make a long chain of unconfirmed payments without hitting mempool limit # Each tx we make leaves only one output of change on a chain 1 longer # Since the amount to send is always much less than the outputs, we only ever need one output # So we should be able to generate exactly chainlimit txs for each # original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] for i in range(chainlimit * 2): txid_list.append( self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2) assert_equal(len(txid_list), chainlimit * 2) # Without walletrejectlongchains, we will still generate a txid # The tx will be stored in the wallet but not accepted to the mempool extra_txid = self.nodes[0].sendtoaddress( sending_addr, Decimal('0.0001')) assert(extra_txid not in self.nodes[0].getrawmempool()) assert(extra_txid in [tx["txid"] for tx in self.nodes[0].listtransactions()]) self.nodes[0].abandontransaction(extra_txid) total_txs = len(self.nodes[0].listtransactions("*", 99999)) # Try with walletrejectlongchains # Double chain limit but require combining inputs, so we pass # SelectCoinsMinConf stop_node(self.nodes[0], 0) self.nodes[0] = start_node(0, self.options.tmpdir, [ "-walletrejectlongchains", "-limitancestorcount=" + str(2 * chainlimit)]) # wait for loadmempool timeout = 10 while (timeout > 0 and len(self.nodes[0].getrawmempool()) < chainlimit * 2): time.sleep(0.5) timeout -= 0.5 assert_equal(len(self.nodes[0].getrawmempool()), chainlimit * 2) node0_balance = self.nodes[0].getbalance() # With walletrejectlongchains we will not create the tx and store it in # our wallet. assert_raises_message(JSONRPCException, "mempool chain", self.nodes[ 0].sendtoaddress, sending_addr, node0_balance - Decimal('0.01')) # Verify nothing new in wallet assert_equal( total_txs, len(self.nodes[0].listtransactions("*", 99999))) if __name__ == '__main__': WalletTest().main() diff --git a/qa/rpc-tests/walletbackup.py b/qa/rpc-tests/walletbackup.py index 8ebc4f4c1..72d156cca 100755 --- a/qa/rpc-tests/walletbackup.py +++ b/qa/rpc-tests/walletbackup.py @@ -1,206 +1,204 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Exercise the wallet backup code. Ported from walletbackup.sh. Test case is: 4 nodes. 1 2 and 3 send transactions between each other, fourth node is a miner. 1 2 3 each mine a block to start, then Miner creates 100 blocks so 1 2 3 each have 50 mature coins to spend. Then 5 iterations of 1/2/3 sending coins amongst themselves to get transactions in the wallets, and the miner mining one block. Wallets are backed up using dumpwallet/backupwallet. Then 5 more iterations of transactions and mining a block. Miner then generates 101 more blocks, so any transaction fees paid mature. Sanity check: Sum(1,2,3,4 balances) == 114*50 1/2/3 are shutdown, and their wallets erased. Then restore using wallet.dat backup. And confirm 1/2/3/4 balances are same as before. Shutdown again, restore using importwallet, and confirm again balances are correct. """ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from random import randint class WalletBackupTest(BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 4 # nodes 1, 2,3 are spenders, let's give them a keypool=100 self.extra_args = [ ["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []] # This mirrors how the network was setup in the bash test - def setup_network(self, split=False): - self.nodes = start_nodes( - self.num_nodes, self.options.tmpdir, self.extra_args) + def setup_network(self): + self.setup_nodes() connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) connect_nodes(self.nodes[2], 3) connect_nodes(self.nodes[2], 0) - self.is_network_split = False self.sync_all() def one_send(self, from_node, to_address): if (randint(1, 2) == 1): amount = Decimal(randint(1, 10)) / Decimal(10) self.nodes[from_node].sendtoaddress(to_address, amount) def do_one_round(self): a0 = self.nodes[0].getnewaddress() a1 = self.nodes[1].getnewaddress() a2 = self.nodes[2].getnewaddress() self.one_send(0, a1) self.one_send(0, a2) self.one_send(1, a0) self.one_send(1, a2) self.one_send(2, a0) self.one_send(2, a1) # Have the miner (node3) mine a block. # Must sync mempools before mining. sync_mempools(self.nodes) self.nodes[3].generate(1) sync_blocks(self.nodes) # As above, this mirrors the original bash test. def start_three(self): self.nodes[0] = start_node(0, self.options.tmpdir) self.nodes[1] = start_node(1, self.options.tmpdir) self.nodes[2] = start_node(2, self.options.tmpdir) connect_nodes(self.nodes[0], 3) connect_nodes(self.nodes[1], 3) connect_nodes(self.nodes[2], 3) connect_nodes(self.nodes[2], 0) def stop_three(self): stop_node(self.nodes[0], 0) stop_node(self.nodes[1], 1) stop_node(self.nodes[2], 2) def erase_three(self): os.remove(self.options.tmpdir + "/node0/regtest/wallet.dat") os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat") os.remove(self.options.tmpdir + "/node2/regtest/wallet.dat") def run_test(self): self.log.info("Generating initial blockchain") self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].generate(1) sync_blocks(self.nodes) self.nodes[2].generate(1) sync_blocks(self.nodes) self.nodes[3].generate(100) sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), 50) assert_equal(self.nodes[1].getbalance(), 50) assert_equal(self.nodes[2].getbalance(), 50) assert_equal(self.nodes[3].getbalance(), 0) self.log.info("Creating transactions") # Five rounds of sending each other transactions. for i in range(5): self.do_one_round() self.log.info("Backing up") tmpdir = self.options.tmpdir self.nodes[0].backupwallet(tmpdir + "/node0/wallet.bak") self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.dump") self.nodes[1].backupwallet(tmpdir + "/node1/wallet.bak") self.nodes[1].dumpwallet(tmpdir + "/node1/wallet.dump") self.nodes[2].backupwallet(tmpdir + "/node2/wallet.bak") self.nodes[2].dumpwallet(tmpdir + "/node2/wallet.dump") self.log.info("More transactions") for i in range(5): self.do_one_round() # Generate 101 more blocks, so any fees paid mature self.nodes[3].generate(101) self.sync_all() balance0 = self.nodes[0].getbalance() balance1 = self.nodes[1].getbalance() balance2 = self.nodes[2].getbalance() balance3 = self.nodes[3].getbalance() total = balance0 + balance1 + balance2 + balance3 # At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.) # 114 are mature, so the sum of all wallets should be 114 * 50 = 5700. assert_equal(total, 5700) # # Test restoring spender wallets from backups # self.log.info("Restoring using wallet.dat") self.stop_three() self.erase_three() # Start node2 with no chain shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") # Restore wallets from backup shutil.copyfile( tmpdir + "/node0/wallet.bak", tmpdir + "/node0/regtest/wallet.dat") shutil.copyfile( tmpdir + "/node1/wallet.bak", tmpdir + "/node1/regtest/wallet.dat") shutil.copyfile( tmpdir + "/node2/wallet.bak", tmpdir + "/node2/regtest/wallet.dat") self.log.info("Re-starting nodes") self.start_three() sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) self.log.info("Restoring using dumped wallet") self.stop_three() self.erase_three() # start node2 with no chain shutil.rmtree(self.options.tmpdir + "/node2/regtest/blocks") shutil.rmtree(self.options.tmpdir + "/node2/regtest/chainstate") self.start_three() assert_equal(self.nodes[0].getbalance(), 0) assert_equal(self.nodes[1].getbalance(), 0) assert_equal(self.nodes[2].getbalance(), 0) self.nodes[0].importwallet(tmpdir + "/node0/wallet.dump") self.nodes[1].importwallet(tmpdir + "/node1/wallet.dump") self.nodes[2].importwallet(tmpdir + "/node2/wallet.dump") sync_blocks(self.nodes) assert_equal(self.nodes[0].getbalance(), balance0) assert_equal(self.nodes[1].getbalance(), balance1) assert_equal(self.nodes[2].getbalance(), balance2) if __name__ == '__main__': WalletBackupTest().main() diff --git a/qa/rpc-tests/zapwallettxes.py b/qa/rpc-tests/zapwallettxes.py index 97453f8e6..075b24c0a 100755 --- a/qa/rpc-tests/zapwallettxes.py +++ b/qa/rpc-tests/zapwallettxes.py @@ -1,81 +1,77 @@ #!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * class ZapWalletTXesTest (BitcoinTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 3 - def setup_network(self, split=False): - self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) - connect_nodes_bi(self.nodes, 0, 1) - connect_nodes_bi(self.nodes, 1, 2) + def setup_network(self): + super().setup_network() connect_nodes_bi(self.nodes, 0, 2) - self.is_network_split = False - self.sync_all() def run_test(self): self.log.info("Mining blocks...") self.nodes[0].generate(1) self.sync_all() self.nodes[1].generate(101) self.sync_all() assert_equal(self.nodes[0].getbalance(), 50) txid0 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) txid1 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) self.sync_all() self.nodes[0].generate(1) self.sync_all() txid2 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11) txid3 = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10) tx0 = self.nodes[0].gettransaction(txid0) assert_equal(tx0['txid'], txid0) # tx0 must be available (confirmed) tx1 = self.nodes[0].gettransaction(txid1) assert_equal(tx1['txid'], txid1) # tx1 must be available (confirmed) tx2 = self.nodes[0].gettransaction(txid2) assert_equal(tx2['txid'], txid2) # tx2 must be available (unconfirmed) tx3 = self.nodes[0].gettransaction(txid3) assert_equal(tx3['txid'], txid3) # tx3 must be available (unconfirmed) # restart bitcoind self.nodes[0].stop() bitcoind_processes[0].wait() self.nodes[0] = start_node(0, self.options.tmpdir) tx3 = self.nodes[0].gettransaction(txid3) assert_equal(tx3['txid'], txid3) # tx must be available (unconfirmed) self.nodes[0].stop() bitcoind_processes[0].wait() # restart bitcoind with zapwallettxes self.nodes[0] = start_node( 0, self.options.tmpdir, ["-zapwallettxes=1"]) assert_raises(JSONRPCException, self.nodes[0].gettransaction, [txid3]) # there must be a expection because the unconfirmed wallettx0 must be # gone by now tx0 = self.nodes[0].gettransaction(txid0) assert_equal(tx0['txid'], txid0) # tx0 (confirmed) must still be available because it was # confirmed if __name__ == '__main__': ZapWalletTXesTest().main() diff --git a/qa/rpc-tests/zmq_test.py b/qa/rpc-tests/zmq_test.py index 5b9a82de8..d26807bf3 100755 --- a/qa/rpc-tests/zmq_test.py +++ b/qa/rpc-tests/zmq_test.py @@ -1,106 +1,106 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test ZMQ interface # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import zmq import struct class ZMQTest (BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 4 port = 28332 def setup_nodes(self): self.zmqContext = zmq.Context() self.zmqSubSocket = self.zmqContext.socket(zmq.SUB) self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock") self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx") self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port) - return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[ + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[ ['-zmqpubhashtx=tcp://127.0.0.1:' + str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:' + str(self.port)], [], [], [] ]) def run_test(self): self.sync_all() genhashes = self.nodes[0].generate(1) self.sync_all() self.log.info("listen...") msg = self.zmqSubSocket.recv_multipart() topic = msg[0] assert_equal(topic, b"hashtx") body = msg[1] nseq = msg[2] msgSequence = struct.unpack('