diff --git a/.arclint b/.arclint index afaeb8579..0dda3e5ec 100644 --- a/.arclint +++ b/.arclint @@ -1,9 +1,13 @@ { "linters": { "clang-format": { "type": "clang-format", "include": "(^src/.*\\.(h|c|cpp)$)", "exclude": "(^src/(secp256k1|leveldb)/)" + }, + "autopep8": { + "type": "autopep8", + "include": "(\\.py$)" } } } diff --git a/arcanist/.phutil_module_cache b/arcanist/.phutil_module_cache index c44f8b63c..0fa9952ef 100644 --- a/arcanist/.phutil_module_cache +++ b/arcanist/.phutil_module_cache @@ -1 +1 @@ -{"__symbol_cache_version__":11,"21dded7bc13561b46b1b39ecbfaa6a3f":{"have":{"class":{"ClangFormatLinter":79}},"need":{"function":{"pht":302,"id":1342},"class":{"ArcanistExternalLinter":105,"ArcanistLintMessage":1349,"Filesystem":1193,"ArcanistLinter":1456,"ArcanistLintSeverity":1534}},"xmap":{"ClangFormatLinter":["ArcanistExternalLinter"]}}} \ No newline at end of file +{"__symbol_cache_version__":11,"21dded7bc13561b46b1b39ecbfaa6a3f":{"have":{"class":{"ClangFormatLinter":79}},"need":{"function":{"pht":302,"id":1342},"class":{"ArcanistExternalLinter":105,"ArcanistLintMessage":1349,"Filesystem":1193,"ArcanistLinter":1456,"ArcanistLintSeverity":1534}},"xmap":{"ClangFormatLinter":["ArcanistExternalLinter"]}},"d60c8224f471e0ecddc2a6f3c6839cd1":{"have":{"class":{"AutoPEP8FormatLinter":75}},"need":{"function":{"pht":297,"id":1317},"class":{"ArcanistExternalLinter":104,"ArcanistLintMessage":1324,"Filesystem":1168,"ArcanistLinter":1431,"ArcanistLintSeverity":1509}},"xmap":{"AutoPEP8FormatLinter":["ArcanistExternalLinter"]}}} \ No newline at end of file diff --git a/arcanist/__phutil_library_map__.php b/arcanist/__phutil_library_map__.php index 74e490d0a..059ec6d17 100644 --- a/arcanist/__phutil_library_map__.php +++ b/arcanist/__phutil_library_map__.php @@ -1,18 +1,20 @@ <?php /** * This file is automatically generated. Use 'arc liberate' to rebuild it. * * @generated * @phutil-library-version 2 */ phutil_register_library_map(array( '__library_version__' => 2, 'class' => array( + 'AutoPEP8FormatLinter' => 'linter/AutoPEP8Linter.php', 'ClangFormatLinter' => 'linter/ClangFormatLinter.php', ), 'function' => array(), 'xmap' => array( + 'AutoPEP8FormatLinter' => 'ArcanistExternalLinter', 'ClangFormatLinter' => 'ArcanistExternalLinter', ), )); diff --git a/arcanist/linter/AutoPEP8Linter.php b/arcanist/linter/AutoPEP8Linter.php new file mode 100644 index 000000000..2bd0032ce --- /dev/null +++ b/arcanist/linter/AutoPEP8Linter.php @@ -0,0 +1,79 @@ +<?php + +/** + * Uses the autopep8 tool to format python code + */ +final class AutoPEP8FormatLinter extends ArcanistExternalLinter { + + public function getInfoName() { + return 'autopep8'; + } + + public function getInfoURI() { + return ''; + } + + public function getInfoDescription() { + return pht('Use autopep8 for processing specified files.'); + } + + public function getLinterName() { + return 'autopep8'; + } + + public function getLinterConfigurationName() { + return 'autopep8'; + } + + public function getLinterConfigurationOptions() { + $options = array( + ); + + return $options + parent::getLinterConfigurationOptions(); + } + + public function getDefaultBinary() { + return 'autopep8'; + } + + public function getInstallInstructions() { + return pht('Make sure autopep8 is in directory specified by $PATH'); + } + + public function shouldExpectCommandErrors() { + return false; + } + + protected function getMandatoryFlags() { + return array( + ); + } + + protected function parseLinterOutput($path, $err, $stdout, $stderr) { + $ok = ($err == 0); + + if (!$ok) { + return false; + } + + $root = $this->getProjectRoot(); + $path = Filesystem::resolvePath($path, $root); + $orig = file_get_contents($path); + if ($orig == $stdout) { + return array(); + } + + $message = id(new ArcanistLintMessage()) + ->setPath($path) + ->setLine(1) + ->setChar(1) + ->setGranularity(ArcanistLinter::GRANULARITY_FILE) + ->setCode('CFMT') + ->setSeverity(ArcanistLintSeverity::SEVERITY_AUTOFIX) + ->setName('Code style violation') + ->setDescription("'$path' has code style errors.") + ->setOriginalText($orig) + ->setReplacementText($stdout); + return array($message); + } +} diff --git a/qa/rpc-tests/abc-cmdline.py b/qa/rpc-tests/abc-cmdline.py index 8c1d4ba55..f27dadcaa 100755 --- a/qa/rpc-tests/abc-cmdline.py +++ b/qa/rpc-tests/abc-cmdline.py @@ -1,138 +1,147 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Exercise the command line functions specific to ABC functionality. Currently: -excessiveblocksize=<blocksize_in_bytes> """ import re from test_framework.test_framework import BitcoinTestFramework from test_framework.util import (start_node, stop_node, assert_equal) from test_framework.cdefs import LEGACY_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE from test_framework.outputchecker import OutputChecker MAX_GENERATED_BLOCK_SIZE_ERROR = ( 'Max generated block size (blockmaxsize) cannot be lower than ' '1MB or exceed the excessive block size (excessiveblocksize)') + class ABC_CmdLine_Test (BitcoinTestFramework): def __init__(self): super(ABC_CmdLine_Test, self).__init__() self.num_nodes = 1 self.setup_clean_chain = False def setup_network(self): self.nodes = self.setup_nodes() def check_excessive(self, expected_value): 'Check that the excessiveBlockSize is as expected' getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, expected_value) def check_subversion(self, pattern_str): 'Check that the subversion is set as expected' netinfo = self.nodes[0].getnetworkinfo() subversion = netinfo['subversion'] pattern = re.compile(pattern_str) assert(pattern.match(subversion)) def excessiveblocksize_test(self): print("Testing -excessiveblocksize") print(" Set to twice the default, i.e. %d bytes" % (2 * LEGACY_MAX_BLOCK_SIZE)) stop_node(self.nodes[0], 0) self.extra_args = [["-excessiveblocksize=%d" % (2 * LEGACY_MAX_BLOCK_SIZE)]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0]) self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE) # Check for EB correctness in the subver string self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/") print(" Attempt to set below legacy limit of 1MB - try %d bytes" % LEGACY_MAX_BLOCK_SIZE) outputchecker = OutputChecker() stop_node(self.nodes[0], 0) try: - self.extra_args = [["-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE]] + self.extra_args = [ + ["-excessiveblocksize=%d" % LEGACY_MAX_BLOCK_SIZE]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: assert(outputchecker.contains( 'Error: Excessive block size must be > 1,000,000 bytes (1MB)')) - assert_equal('bitcoind exited with status 1 during initialization', str(e)) + assert_equal( + 'bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError("Must not accept excessiveblocksize" " value < %d bytes" % LEGACY_MAX_BLOCK_SIZE) print(" Attempt to set below blockmaxsize (mining limit)") outputchecker = OutputChecker() try: self.extra_args = [['-blockmaxsize=1500000', '-excessiveblocksize=1300000']] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: - assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) - assert_equal('bitcoind exited with status 1 during initialization', str(e)) + assert(outputchecker.contains( + 'Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) + assert_equal( + 'bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError('Must not accept excessiveblocksize' ' below blockmaxsize') # Make sure that allowsmallgeneratedblocksize doesn't help here outputchecker = OutputChecker() try: self.extra_args = [['-blockmaxsize=1500000', '-excessiveblocksize=1300000', '-allowsmallgeneratedblocksize']] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: - assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) - assert_equal('bitcoind exited with status 1 during initialization', str(e)) + assert(outputchecker.contains( + 'Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) + assert_equal( + 'bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError('Must not accept excessiveblocksize' ' below blockmaxsize') print(" Attempt to set blockmaxsize below 1MB") outputchecker = OutputChecker() try: self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) except Exception as e: - assert(outputchecker.contains('Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) - assert_equal('bitcoind exited with status 1 during initialization', str(e)) + assert(outputchecker.contains( + 'Error: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) + assert_equal( + 'bitcoind exited with status 1 during initialization', str(e)) else: raise AssertionError('Must not accept excessiveblocksize' ' below blockmaxsize') outputchecker = OutputChecker() self.extra_args = [["-blockmaxsize=%d" % LEGACY_MAX_BLOCK_SIZE, "-allowsmallgeneratedblocksize"]] self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0], stderr_checker=outputchecker) - assert(outputchecker.contains('Warning: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) - + assert(outputchecker.contains( + 'Warning: ' + MAX_GENERATED_BLOCK_SIZE_ERROR)) def run_test(self): # Run tests on -excessiveblocksize option self.excessiveblocksize_test() if __name__ == '__main__': ABC_CmdLine_Test().main() diff --git a/qa/rpc-tests/abc-p2p-activation.py b/qa/rpc-tests/abc-p2p-activation.py index 7e3b7a161..339b3ac19 100755 --- a/qa/rpc-tests/abc-p2p-activation.py +++ b/qa/rpc-tests/abc-p2p-activation.py @@ -1,369 +1,390 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks activation of UAHF and the different consensus related to this activation. It is derived from the much more complex p2p-fullblocktest. """ from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.comptool import TestManager, TestInstance, RejectResult from test_framework.blocktools import * import time from test_framework.key import CECKey from test_framework.script import * from test_framework.cdefs import * # Error for illegal use of SIGHASH_FORKID SIGHASH_FORKID_ERROR = b'non-mandatory-script-verify-flag (Illegal use of SIGHASH_FORKID)' RPC_SIGHASH_FORKID_ERROR = "64: " + SIGHASH_FORKID_ERROR.decode("utf-8") SIGHASH_INVALID_ERROR = b'mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack e' # far into the future UAHF_START_TIME = 2000000000 + class PreviousSpendableOutput(object): - def __init__(self, tx = CTransaction(), n = -1): + + def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. - # Change the "outcome" variable from each TestInstance object to only do the comparison. + # Change the "outcome" variable from each TestInstance object to only do + # the comparison. + def __init__(self): super().__init__() self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.forkid_key = CECKey() self.forkid_key.set_secretbytes(b"forkid") self.forkid_pubkey = self.forkid_key.get_pubkey() self.tip = None self.uahfEnabled = False self.blocks = {} def setup_network(self): self.extra_args = [['-debug', '-norelaypriority', "-uahfstarttime=%d" % UAHF_START_TIME, '-whitelist=127.0.0.1', - '-par=1' ]] + '-par=1']] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, binary=[self.options.testbinary]) def add_options(self, parser): super().add_options(parser) - parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) + parser.add_option( + "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() # Mock the time so that block activating the HF will be accepted self.nodes[0].setmocktime(UAHF_START_TIME) self.test.run() def add_transactions_to_block(self, block, tx_list): - [ tx.rehash() for tx in tx_list ] + [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about - # this signs input 0 in tx, which is assumed to be spending output n in spend_tx + # this signs input 0 in tx, which is assumed to be spending output n in + # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return - (sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) - tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) + (sighash, err) = SignatureHash( + spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL) + tx.vin[0].scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True): """ Create a block on top of self.tip, and advance self.tip to point to the new block if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, and rest will go to fees. """ if self.tip == None: base_block_hash = self.genesis_hash - block_time = int(time.time())+1 + block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): - coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees + coinbase.vout[0].nValue += spend.tx.vout[ + spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) spendable_output = None if (spend != None): tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet + tx.vin.append( + CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet # This copies the java comparison tool testing behavior: the first # txout has a garbage scriptPubKey, "to make sure we're not # pre-verifying too much" (?) - tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255]))) + tx.vout.append( + CTxOut(0, CScript([random.randint(0, 255), height & 255]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) spendable_output = PreviousSpendableOutput(tx, 0) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it nHashType = SIGHASH_ALL sighash = None if self.uahfEnabled == False: - (sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL) + (sighash, err) = SignatureHash( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL) else: - nHashType |= SIGHASH_FORKID - sighash = SignatureHashForkId(spend.tx.vout[spend.n].scriptPubKey, tx, 0, nHashType, spend.tx.vout[spend.n].nValue) - scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([nHashType]))]) + nHashType |= SIGHASH_FORKID + sighash = SignatureHashForkId( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, nHashType, spend.tx.vout[spend.n].nValue) + scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([nHashType]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if spendable_output != None and block_size > 0: while len(block.serialize()) < block_size: tx = CTransaction() script_length = block_size - len(block.serialize()) - 79 if script_length > 510000: script_length = 500000 - tx_sigops = min(extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) + tx_sigops = min( + extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) extra_sigops -= tx_sigops script_pad_len = script_length - tx_sigops - script_output = CScript([b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) + script_output = CScript( + [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) + tx.vin.append( + CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) spendable_output = PreviousSpendableOutput(tx, 0) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Make sure the math above worked out to produce the correct block size # (the math will fail if there are too many transactions in the block) assert_equal(len(block.serialize()), block_size) # Make sure all the requested sigops have been included assert_equal(extra_sigops, 0) if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected - def rejected(reject = None): + def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: - self.block_heights[block.sha256] = self.block_heights[old_sha256] + self.block_heights[ + block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block node = self.nodes[0] # Create a new block block(0, block_size=LEGACY_MAX_BLOCK_SIZE) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # block up to LEGACY_MAX_BLOCK_SIZE are accepted. block(1, spend=out[0], block_size=LEGACY_MAX_BLOCK_SIZE) yield accepted() # bigger block are reject as the fork isn't activated yet. block(2, spend=out[1], block_size=LEGACY_MAX_BLOCK_SIZE + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block tip(1) # Create a transaction that we will use to test SIGHASH_FORID script_forkid = CScript([self.forkid_pubkey, OP_CHECKSIG]) - tx_forkid = self.create_and_sign_transaction(out[1].tx, out[1].n, 1, script_forkid) + tx_forkid = self.create_and_sign_transaction( + out[1].tx, out[1].n, 1, script_forkid) # Create a block that would activate the HF. We also add the # transaction that will allow us to test SIGHASH_FORKID b03 = block(3) b03.nTime = UAHF_START_TIME update_block(3, [tx_forkid]) yield accepted() # Pile up 4 blocks on top to get to the point just before activation. block(4, spend=out[2]) yield accepted() block(5, spend=out[3]) yield accepted() block(6, spend=out[4]) yield accepted() block(7, spend=out[5]) yield accepted() # bigger block are still rejected as the fork isn't activated yet. block(8, spend=out[6], block_size=LEGACY_MAX_BLOCK_SIZE + 1) yield rejected(RejectResult(16, b'bad-blk-length')) # Rewind bad block tip(7) # build a transaction using SIGHASH_FORKID tx_spend = self.create_tx(tx_forkid, 0, 1, CScript([OP_TRUE])) - sighash_spend = SignatureHashForkId(script_forkid, tx_spend, 0, SIGHASH_FORKID | SIGHASH_ALL, 1) + sighash_spend = SignatureHashForkId( + script_forkid, tx_spend, 0, SIGHASH_FORKID | SIGHASH_ALL, 1) sig_forkid = self.forkid_key.sign(sighash_spend) - tx_spend.vin[0].scriptSig = CScript([sig_forkid + bytes(bytearray([SIGHASH_FORKID | SIGHASH_ALL]))]) + tx_spend.vin[0].scriptSig = CScript( + [sig_forkid + bytes(bytearray([SIGHASH_FORKID | SIGHASH_ALL]))]) tx_spend.rehash() # This transaction can't get into the mempool yet try: node.sendrawtransaction(ToHex(tx_spend)) except JSONRPCException as exp: assert_equal(exp.error["message"], RPC_SIGHASH_FORKID_ERROR) else: assert(False) # The transaction is rejected, so the mempool should still be empty assert_equal(set(node.getrawmempool()), set()) # check that SIGHASH_FORKID transaction are still rejected block(9) update_block(9, [tx_spend]) yield rejected(RejectResult(16, SIGHASH_INVALID_ERROR)) # Rewind bad block tip(7) # Pile up another block, to activate. OP_RETURN anti replay # outputs are still considered valid. - antireplay_script=CScript([OP_RETURN, ANTI_REPLAY_COMMITMENT]) + antireplay_script = CScript([OP_RETURN, ANTI_REPLAY_COMMITMENT]) block(10, spend=out[6], script=antireplay_script) yield accepted() # Now that the HF is activated, replay protected tx are # accepted in the mempool tx_spend_id = node.sendrawtransaction(ToHex(tx_spend)) assert_equal(set(node.getrawmempool()), {tx_spend_id}) # Mark the HF self.uahfEnabled = True # HF is active now, we MUST create a big block. - block(11, spend=out[7], block_size=LEGACY_MAX_BLOCK_SIZE); + block(11, spend=out[7], block_size=LEGACY_MAX_BLOCK_SIZE) yield rejected(RejectResult(16, b'bad-blk-too-small')) # Rewind bad block tip(10) # HF is active, now we can create bigger blocks and use # SIGHASH_FORKID replay protection. block(12, spend=out[7], block_size=LEGACY_MAX_BLOCK_SIZE + 1) update_block(12, [tx_spend]) yield accepted() # We save this block id to test reorg fork_block_id = node.getbestblockhash() # The transaction has been mined, it's not in the mempool anymore assert_equal(set(node.getrawmempool()), set()) # Test OP_RETURN replay protection block(13, spend=out[8], script=antireplay_script) yield rejected(RejectResult(16, b'bad-txn-replay')) # Rewind bad block tip(12) # Check that only the first block has to be > 1MB block(14, spend=out[8]) yield accepted() # Now we reorg just when the HF activated. The # SIGHASH_FORKID transaction is back in the mempool node.invalidateblock(fork_block_id) assert(tx_spend_id in set(node.getrawmempool())) # And now just before when the HF activated. The # SIGHASH_FORKID should be kicked out the mempool node.invalidateblock(node.getbestblockhash()) assert(tx_spend_id not in set(node.getrawmempool())) if __name__ == '__main__': FullBlockTest().main() diff --git a/qa/rpc-tests/abc-p2p-fullblocktest.py b/qa/rpc-tests/abc-p2p-fullblocktest.py index 2ab86df7d..82eff3933 100755 --- a/qa/rpc-tests/abc-p2p-fullblocktest.py +++ b/qa/rpc-tests/abc-p2p-fullblocktest.py @@ -1,500 +1,540 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2016 The Bitcoin Core developers # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ This test checks simple acceptance of bigger blocks via p2p. It is derived from the much more complex p2p-fullblocktest. The intention is that small tests can be derived from this one, or this one can be extended, to cover the checks done for bigger blocks (e.g. sigops limits). """ from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.comptool import TestManager, TestInstance, RejectResult from test_framework.blocktools import * import time from test_framework.key import CECKey from test_framework.script import * from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, MAX_BLOCK_SIGOPS_PER_MB, MAX_TX_SIGOPS_COUNT) # far into the future UAHF_START_TIME = 2000000000 + class PreviousSpendableOutput(object): - def __init__(self, tx = CTransaction(), n = -1): + + def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending # TestNode: A peer we use to send messages to bitcoind, and store responses. class TestNode(SingleNodeConnCB): + def __init__(self): self.last_sendcmpct = None self.last_cmpctblock = None self.last_getheaders = None self.last_headers = None SingleNodeConnCB.__init__(self) def on_sendcmpct(self, conn, message): self.last_sendcmpct = message def on_cmpctblock(self, conn, message): self.last_cmpctblock = message self.last_cmpctblock.header_and_shortids.header.calc_sha256() def on_getheaders(self, conn, message): self.last_getheaders = message def on_headers(self, conn, message): self.last_headers = message for x in self.last_headers.headers: x.calc_sha256() def clear_block_data(self): with mininode_lock: self.last_sendcmpct = None self.last_cmpctblock = None class FullBlockTest(ComparisonTestFramework): # Can either run this test as 1 node with expected answers, or two and compare them. - # Change the "outcome" variable from each TestInstance object to only do the comparison. + # Change the "outcome" variable from each TestInstance object to only do + # the comparison. + def __init__(self): super().__init__() self.excessive_block_size = 16 * ONE_MEGABYTE self.num_nodes = 1 self.block_heights = {} self.coinbase_key = CECKey() self.coinbase_key.set_secretbytes(b"fatstacks") self.coinbase_pubkey = self.coinbase_key.get_pubkey() self.tip = None self.blocks = {} def setup_network(self): self.extra_args = [['-debug', '-norelaypriority', '-whitelist=127.0.0.1', '-limitancestorcount=9999', '-limitancestorsize=9999', '-limitdescendantcount=9999', '-limitdescendantsize=9999', '-maxmempool=999', "-uahfstarttime=%d" % UAHF_START_TIME, "-excessiveblocksize=%d" - % self.excessive_block_size ]] + % self.excessive_block_size]] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, binary=[self.options.testbinary]) def add_options(self, parser): super().add_options(parser) - parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True) + parser.add_option( + "--runbarelyexpensive", dest="runbarelyexpensive", default=True) def run_test(self): self.test = TestManager(self, self.options.tmpdir) self.test.add_all_connections(self.nodes) # Start up network handling in another thread NetworkThread().start() # Set the blocksize to 2MB as initial condition self.nodes[0].setexcessiveblock(self.excessive_block_size) self.nodes[0].setmocktime(UAHF_START_TIME) self.test.run() def add_transactions_to_block(self, block, tx_list): - [ tx.rehash() for tx in tx_list ] + [tx.rehash() for tx in tx_list] block.vtx.extend(tx_list) # this is a little handier to use than the version in blocktools.py def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = create_transaction(spend_tx, n, b"", value, script) return tx # sign a transaction, using the key we know about - # this signs input 0 in tx, which is assumed to be spending output n in spend_tx + # this signs input 0 in tx, which is assumed to be spending output n in + # spend_tx def sign_tx(self, tx, spend_tx, n): scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend tx.vin[0].scriptSig = CScript() return - sighash = SignatureHashForkId(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL|SIGHASH_FORKID, spend_tx.vout[n].nValue) - tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL|SIGHASH_FORKID]))]) + sighash = SignatureHashForkId( + spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend_tx.vout[n].nValue) + tx.vin[0].scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])): tx = self.create_tx(spend_tx, n, value, script) self.sign_tx(tx, spend_tx, n) tx.rehash() return tx def next_block(self, number, spend=None, additional_coinbase_value=0, script=None, extra_sigops=0, block_size=0, solve=True): """ Create a block on top of self.tip, and advance self.tip to point to the new block if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output, and rest will go to fees. """ if self.tip == None: base_block_hash = self.genesis_hash - block_time = int(time.time())+1 + block_time = int(time.time()) + 1 else: base_block_hash = self.tip.sha256 block_time = self.tip.nTime + 1 # First create the coinbase height = self.block_heights[base_block_hash] + 1 coinbase = create_coinbase(height, self.coinbase_pubkey) coinbase.vout[0].nValue += additional_coinbase_value if (spend != None): - coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees + coinbase.vout[0].nValue += spend.tx.vout[ + spend.n].nValue - 1 # all but one satoshi to fees coinbase.rehash() block = create_block(base_block_hash, coinbase, block_time) spendable_output = None if (spend != None): tx = CTransaction() - tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet - # We put some random data into the first transaction of the chain to randomize ids - tx.vout.append(CTxOut(0, CScript([random.randint(0,255), OP_DROP, OP_TRUE]))) + tx.vin.append( + CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet + # We put some random data into the first transaction of the chain + # to randomize ids + tx.vout.append( + CTxOut(0, CScript([random.randint(0, 255), OP_DROP, OP_TRUE]))) if script == None: tx.vout.append(CTxOut(1, CScript([OP_TRUE]))) else: tx.vout.append(CTxOut(1, script)) spendable_output = PreviousSpendableOutput(tx, 0) # Now sign it if necessary scriptSig = b"" scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend scriptSig = CScript([OP_TRUE]) else: # We have to actually sign it - sighash = SignatureHashForkId(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL|SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) - scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL|SIGHASH_FORKID]))]) + sighash = SignatureHashForkId( + spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL | SIGHASH_FORKID, spend.tx.vout[spend.n].nValue) + scriptSig = CScript( + [self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID]))]) tx.vin[0].scriptSig = scriptSig # Now add the transaction to the block self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() if spendable_output != None and block_size > 0: while len(block.serialize()) < block_size: tx = CTransaction() script_length = block_size - len(block.serialize()) - 79 if script_length > 510000: script_length = 500000 - tx_sigops = min(extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) + tx_sigops = min( + extra_sigops, script_length, MAX_TX_SIGOPS_COUNT) extra_sigops -= tx_sigops script_pad_len = script_length - tx_sigops - script_output = CScript([b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) + script_output = CScript( + [b'\x00' * script_pad_len] + [OP_CHECKSIG] * tx_sigops) tx.vout.append(CTxOut(0, CScript([OP_TRUE]))) tx.vout.append(CTxOut(0, script_output)) - tx.vin.append(CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) + tx.vin.append( + CTxIn(COutPoint(spendable_output.tx.sha256, spendable_output.n))) spendable_output = PreviousSpendableOutput(tx, 0) self.add_transactions_to_block(block, [tx]) block.hashMerkleRoot = block.calc_merkle_root() # Make sure the math above worked out to produce the correct block size # (the math will fail if there are too many transactions in the block) assert_equal(len(block.serialize()), block_size) # Make sure all the requested sigops have been included assert_equal(extra_sigops, 0) if solve: block.solve() self.tip = block self.block_heights[block.sha256] = height assert number not in self.blocks self.blocks[number] = block return block def get_tests(self): self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) self.block_heights[self.genesis_hash] = 0 spendable_outputs = [] # save the current tip so it can be spent by a later block def save_spendable_output(): spendable_outputs.append(self.tip) # get an output that we previously marked as spendable def get_spendable_output(): return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0) # returns a test case that asserts that the current tip was accepted def accepted(): return TestInstance([[self.tip, True]]) # returns a test case that asserts that the current tip was rejected - def rejected(reject = None): + def rejected(reject=None): if reject is None: return TestInstance([[self.tip, False]]) else: return TestInstance([[self.tip, reject]]) # move the tip back to a previous block def tip(number): self.tip = self.blocks[number] # adds transactions to the block and updates state def update_block(block_number, new_transactions): block = self.blocks[block_number] self.add_transactions_to_block(block, new_transactions) old_sha256 = block.sha256 block.hashMerkleRoot = block.calc_merkle_root() block.solve() # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: - self.block_heights[block.sha256] = self.block_heights[old_sha256] + self.block_heights[ + block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block # shorthand for functions block = self.next_block # Create a new block block(0) save_spendable_output() yield accepted() # Now we need that block to mature so we can spend the coinbase. test = TestInstance(sync_every_block=False) for i in range(99): block(5000 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # In order to trigger the HF, we need one block past activation time bfork = block(5555) bfork.nTime = UAHF_START_TIME update_block(5555, []) save_spendable_output() yield accepted() # Then we pile 5 blocks to move MTP forward and trigger the HF for i in range(5): block(5100 + i) test.blocks_and_transactions.append([self.tip, True]) save_spendable_output() yield test # Create a new block and activate the fork, the block needs # to be > 1MB . For more specific tests about the fork activation, # check abc-p2p-activation.py - block(5556, spend=get_spendable_output(), block_size=LEGACY_MAX_BLOCK_SIZE + 1) + block(5556, spend=get_spendable_output(), + block_size=LEGACY_MAX_BLOCK_SIZE + 1) yield accepted() # collect spendable outputs now to avoid cluttering the code later on out = [] for i in range(100): out.append(get_spendable_output()) # Let's build some blocks and test them. for i in range(16): n = i + 1 - block(n, spend=out[i], block_size=n*ONE_MEGABYTE) + block(n, spend=out[i], block_size=n * ONE_MEGABYTE) yield accepted() # block of maximal size block(17, spend=out[16], block_size=self.excessive_block_size) yield accepted() # Reject oversized blocks with bad-blk-length error block(18, spend=out[17], block_size=self.excessive_block_size + 1) yield rejected(RejectResult(16, b'bad-blk-length')) - + # Rewind bad block. tip(17) # Accept many sigops - lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) - block(19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) + lots_of_checksigs = CScript( + [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB - 1)) + block( + 19, spend=out[17], script=lots_of_checksigs, block_size=ONE_MEGABYTE) yield accepted() - - too_many_blk_checksigs = CScript([OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) - block(20, spend=out[18], script=too_many_blk_checksigs, block_size=ONE_MEGABYTE) + + too_many_blk_checksigs = CScript( + [OP_CHECKSIG] * MAX_BLOCK_SIGOPS_PER_MB) + block( + 20, spend=out[18], script=too_many_blk_checksigs, block_size=ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(19) # Accept 40k sigops per block > 1MB and <= 2MB - block(21, spend=out[18], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) + block(21, spend=out[18], script=lots_of_checksigs, + extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=ONE_MEGABYTE + 1) yield accepted() # Accept 40k sigops per block > 1MB and <= 2MB - block(22, spend=out[19], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2*ONE_MEGABYTE) + block(22, spend=out[19], script=lots_of_checksigs, + extra_sigops=MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. - block(23, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) + block(23, spend=out[20], script=lots_of_checksigs, + extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Reject more than 40k sigops per block > 1MB and <= 2MB. - block(24, spend=out[20], script=lots_of_checksigs, extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2*ONE_MEGABYTE) + block(24, spend=out[20], script=lots_of_checksigs, + extra_sigops=MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(22) # Accept 60k sigops per block > 2MB and <= 3MB - block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2*MAX_BLOCK_SIGOPS_PER_MB, block_size=2*ONE_MEGABYTE + 1) + block(25, spend=out[20], script=lots_of_checksigs, extra_sigops=2 * + MAX_BLOCK_SIGOPS_PER_MB, block_size=2 * ONE_MEGABYTE + 1) yield accepted() # Accept 60k sigops per block > 2MB and <= 3MB - block(26, spend=out[21], script=lots_of_checksigs, extra_sigops=2*MAX_BLOCK_SIGOPS_PER_MB, block_size=3*ONE_MEGABYTE) + block(26, spend=out[21], script=lots_of_checksigs, + extra_sigops=2 * MAX_BLOCK_SIGOPS_PER_MB, block_size=3 * ONE_MEGABYTE) yield accepted() # Reject more than 40k sigops per block > 1MB and <= 2MB. - block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2*MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2*ONE_MEGABYTE + 1) + block(27, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * + MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=2 * ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Reject more than 40k sigops per block > 1MB and <= 2MB. - block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2*MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3*ONE_MEGABYTE) + block(28, spend=out[22], script=lots_of_checksigs, extra_sigops=2 * + MAX_BLOCK_SIGOPS_PER_MB + 1, block_size=3 * ONE_MEGABYTE) yield rejected(RejectResult(16, b'bad-blk-sigops')) # Rewind bad block tip(26) # Too many sigops in one txn - too_many_tx_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) - block(29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) + too_many_tx_checksigs = CScript( + [OP_CHECKSIG] * (MAX_BLOCK_SIGOPS_PER_MB + 1)) + block( + 29, spend=out[22], script=too_many_tx_checksigs, block_size=ONE_MEGABYTE + 1) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(26) # P2SH # Build the redeem script, hash it, use hash to create the p2sh script - redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG]) + redeem_script = CScript([self.coinbase_pubkey] + [ + OP_2DUP, OP_CHECKSIGVERIFY] * 5 + [OP_CHECKSIG]) redeem_script_hash = hash160(redeem_script) p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL]) # Create a p2sh transaction - p2sh_tx = self.create_and_sign_transaction(out[22].tx, out[22].n, 1, p2sh_script) + p2sh_tx = self.create_and_sign_transaction( + out[22].tx, out[22].n, 1, p2sh_script) # Add the transaction to the block block(30) update_block(30, [p2sh_tx]) yield accepted() - # Creates a new transaction using the p2sh transaction included in the last block + # Creates a new transaction using the p2sh transaction included in the + # last block def spend_p2sh_tx(output_script=CScript([OP_TRUE])): # Create the transaction spent_p2sh_tx = CTransaction() spent_p2sh_tx.vin.append(CTxIn(COutPoint(p2sh_tx.sha256, 0), b'')) spent_p2sh_tx.vout.append(CTxOut(1, output_script)) # Sign the transaction using the redeem script - sighash = SignatureHashForkId(redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL|SIGHASH_FORKID, p2sh_tx.vout[0].nValue) - sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL|SIGHASH_FORKID])) + sighash = SignatureHashForkId( + redeem_script, spent_p2sh_tx, 0, SIGHASH_ALL | SIGHASH_FORKID, p2sh_tx.vout[0].nValue) + sig = self.coinbase_key.sign(sighash) + bytes( + bytearray([SIGHASH_ALL | SIGHASH_FORKID])) spent_p2sh_tx.vin[0].scriptSig = CScript([sig, redeem_script]) spent_p2sh_tx.rehash() return spent_p2sh_tx # Sigops p2sh limit - p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - redeem_script.GetSigOpCount(True) + p2sh_sigops_limit = MAX_BLOCK_SIGOPS_PER_MB - \ + redeem_script.GetSigOpCount(True) # Too many sigops in one p2sh txn too_many_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit + 1)) block(31, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(31, [spend_p2sh_tx(too_many_p2sh_sigops)]) yield rejected(RejectResult(16, b'bad-txn-sigops')) # Rewind bad block tip(30) # Max sigops in one p2sh txn max_p2sh_sigops = CScript([OP_CHECKSIG] * (p2sh_sigops_limit)) block(32, spend=out[23], block_size=ONE_MEGABYTE + 1) update_block(32, [spend_p2sh_tx(max_p2sh_sigops)]) yield accepted() # Check that compact block also work for big blocks node = self.nodes[0] peer = TestNode() - peer.add_connection(NodeConn('127.0.0.1', p2p_port(0), node, peer)); + peer.add_connection(NodeConn('127.0.0.1', p2p_port(0), node, peer)) # Start up network handling in another thread and wait for connection # to be etablished NetworkThread().start() peer.wait_for_verack() # Wait for SENDCMPCT def received_sendcmpct(): return (peer.last_sendcmpct != None) got_sendcmpt = wait_until(received_sendcmpct, timeout=30) assert(got_sendcmpt) sendcmpct = msg_sendcmpct() sendcmpct.version = 1 sendcmpct.announce = True peer.send_and_ping(sendcmpct) # Exchange headers def received_getheaders(): return (peer.last_getheaders != None) got_getheaders = wait_until(received_getheaders, timeout=30) assert(got_getheaders) # Return the favor peer.send_message(peer.last_getheaders) # Wait for the header list def received_headers(): return (peer.last_headers != None) got_headers = wait_until(received_headers, timeout=30) assert(got_headers) # It's like we know about the same headers ! peer.send_message(peer.last_headers) # Send a block b33 = block(33, spend=out[24], block_size=ONE_MEGABYTE + 1) yield accepted() # Checks the node to forward it via compact block def received_block(): return (peer.last_cmpctblock != None) got_cmpctblock = wait_until(received_block, timeout=30) assert(got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b33.sha256) # Send a bigger block peer.clear_block_data() - b34 = block(34, spend=out[25], block_size=8*ONE_MEGABYTE) + b34 = block(34, spend=out[25], block_size=8 * ONE_MEGABYTE) yield accepted() # Checks the node to forward it via compact block got_cmpctblock = wait_until(received_block, timeout=30) assert(got_cmpctblock) # Was it our block ? cmpctblk_header = peer.last_cmpctblock.header_and_shortids.header cmpctblk_header.calc_sha256() assert(cmpctblk_header.sha256 == b34.sha256) # Let's send a compact block and see if the node accepts it. # First, we generate the block and send all transaction to the mempool - b35 = block(35, spend=out[26], block_size=8*ONE_MEGABYTE) + b35 = block(35, spend=out[26], block_size=8 * ONE_MEGABYTE) for i in range(1, len(b35.vtx)): node.sendrawtransaction(ToHex(b35.vtx[i]), True) # Now we create the compact block and send it comp_block = HeaderAndShortIDs() comp_block.initialize_from_block(b35) peer.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) # Check that compact block is received properly assert(int(node.getbestblockhash(), 16) == b35.sha256) if __name__ == '__main__': FullBlockTest().main() diff --git a/qa/rpc-tests/abc-rpc.py b/qa/rpc-tests/abc-rpc.py index 7111a502c..28e0d434a 100755 --- a/qa/rpc-tests/abc-rpc.py +++ b/qa/rpc-tests/abc-rpc.py @@ -1,174 +1,179 @@ #!/usr/bin/env python3 # Copyright (c) 2017 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Exercise the Bitcoin ABC RPC calls. import time import random import re from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import NODE_BITCOIN_CASH from test_framework.cdefs import (ONE_MEGABYTE, LEGACY_MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE) # far into the future UAHF_START_TIME = 2000000000 + class ABC_RPC_Test (BitcoinTestFramework): def __init__(self): super(ABC_RPC_Test, self).__init__() self.num_nodes = 1 self.tip = None self.setup_clean_chain = True def setup_network(self): self.extra_args = [['-debug', '-norelaypriority', "-mocktime=%d" % UAHF_START_TIME, "-uahfstarttime=%d" % UAHF_START_TIME, '-whitelist=127.0.0.1', '-par=1']] self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args) self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16) def check_subversion(self, pattern_str): 'Check that the subversion is set as expected' netinfo = self.nodes[0].getnetworkinfo() subversion = netinfo['subversion'] pattern = re.compile(pattern_str) assert(pattern.match(subversion)) def test_excessiveblock(self): # Check that we start with DEFAULT_MAX_BLOCK_SIZE getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, DEFAULT_MAX_BLOCK_SIZE) # Check that setting to legacy size is ok self.nodes[0].setexcessiveblock(LEGACY_MAX_BLOCK_SIZE + 1) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, LEGACY_MAX_BLOCK_SIZE + 1) # Check that going below legacy size is not accepted try: self.nodes[0].setexcessiveblock(LEGACY_MAX_BLOCK_SIZE) except JSONRPCException as e: assert("Invalid parameter, excessiveblock must be larger than %d" % LEGACY_MAX_BLOCK_SIZE - in e.error['message']) + in e.error['message']) else: - raise AssertionError("Must not accept excessiveblock values <= %d bytes" % LEGACY_MAX_BLOCK_SIZE) + raise AssertionError( + "Must not accept excessiveblock values <= %d bytes" % LEGACY_MAX_BLOCK_SIZE) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, LEGACY_MAX_BLOCK_SIZE + 1) # Check setting to 2MB self.nodes[0].setexcessiveblock(2 * ONE_MEGABYTE) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, 2 * ONE_MEGABYTE) # Check for EB correctness in the subver string self.check_subversion("/Bitcoin ABC:.*\(EB2\.0\)/") # Check setting to 13MB self.nodes[0].setexcessiveblock(13 * ONE_MEGABYTE) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, 13 * ONE_MEGABYTE) # Check for EB correctness in the subver string self.check_subversion("/Bitcoin ABC:.*\(EB13\.0\)/") # Check setting to 13.14MB self.nodes[0].setexcessiveblock(13140000) getsize = self.nodes[0].getexcessiveblock() ebs = getsize['excessiveBlockSize'] assert_equal(ebs, 13.14 * ONE_MEGABYTE) # check for EB correctness in the subver string self.check_subversion("/Bitcoin ABC:.*\(EB13\.1\)/") def test_uahfstarttime(self): node = self.nodes[0] + def check_uahf_starttime_equals(val): starttime_reply = node.getuahfstarttime() assert_equal(starttime_reply['uahfStartTime'], val) # Check that we start with UAHF_START_TIME check_uahf_starttime_equals(UAHF_START_TIME) # Check that setting <= 2 hours from chain tip MTP is not allowed self.tip = node.getblock(node.getbestblockhash()) tip_mtp = self.tip['mediantime'] assert(tip_mtp < UAHF_START_TIME - 7201) for offset_secs in (-1, 0, 1, 7200): try: node.setuahfstarttime(tip_mtp + offset_secs) except JSONRPCException as e: assert("Invalid parameter, uahfStartTime must be greater than chain tip " "MTP+2hrs (%d)" % (tip_mtp + 7200) in e.error['message']) else: - raise AssertionError("Must not accept uahfStartTime values within 2 hrs of chain tip MTP") + raise AssertionError( + "Must not accept uahfStartTime values within 2 hrs of chain tip MTP") check_uahf_starttime_equals(UAHF_START_TIME) # Check that setting to > tip MTP + 2hrs is ok node.setuahfstarttime(tip_mtp + 7200 + 1) check_uahf_starttime_equals(tip_mtp + 7200 + 1) # Activate UAHF to check that updating is no longer allowed node.setuahfstarttime(UAHF_START_TIME) check_uahf_starttime_equals(UAHF_START_TIME) # Add a block at UAHF start time to activate the fork # Since we are right on top of genesis block, it only takes one # block with the start time to get chain MTP to activation. node.generate(1) self.tip = node.getblock(node.getbestblockhash()) tip_size = self.tip['size'] # Still only waiting for the fork block at this stage. assert(tip_size <= LEGACY_MAX_BLOCK_SIZE) # Check that we are no longer allowed to update start time. def check_cannot_update_starttime(): ''' Check that setting > 2 hours from chain tip MTP is no longer allowed ''' try: node.setuahfstarttime(tip_mtp + 7200 + 1) except JSONRPCException as e: assert("UAHF already activated - disallowing start time modification" in e.error['message']) else: - raise AssertionError("Must not accept uahfStartTime modification once UAHF is activated.") + raise AssertionError( + "Must not accept uahfStartTime modification once UAHF is activated.") check_uahf_starttime_equals(UAHF_START_TIME) check_cannot_update_starttime() # Create the >1MB fork block node.generate(1) self.tip = node.getblock(node.getbestblockhash()) tip_size = self.tip['size'] assert(tip_size > LEGACY_MAX_BLOCK_SIZE) # Not allowed to update anymore. check_cannot_update_starttime() def test_cashservicebit(self): # Check that NODE_BITCOIN_CASH bit is set. # This can be seen in the 'localservices' entry of getnetworkinfo RPC. node = self.nodes[0] nw_info = node.getnetworkinfo() assert_equal(int(nw_info['localservices'], 16) & NODE_BITCOIN_CASH, NODE_BITCOIN_CASH) def run_test(self): self.test_excessiveblock() self.test_uahfstarttime() self.test_cashservicebit() if __name__ == '__main__': - ABC_RPC_Test().main () + ABC_RPC_Test().main()