diff --git a/.arclint b/.arclint --- a/.arclint +++ b/.arclint @@ -22,7 +22,8 @@ "include": "(\\.py$)", "exclude": [ "(^contrib/gitian-builder/)", - "(^contrib/apple-sdk-tools/)" + "(^contrib/apple-sdk-tools/)", + "(^test/functional/abc-.*\\.py$)" ], "flags": [ "--aggressive", @@ -30,6 +31,16 @@ "--max-line-length=88" ] }, + "black": { + "type": "black", + "version": ">=23.0.0", + "include": [ + "(^test/functional/abc-.*\\.py$)" + ], + "flags": [ + "--experimental-string-processing" + ] + }, "flake8": { "type": "flake8", "version": ">=5.0", @@ -39,7 +50,7 @@ "(^contrib/apple-sdk-tools/)" ], "flags": [ - "--ignore=A003,E303,E305,E501,E704,W503,W504", + "--ignore=A003,E203,E303,E305,E501,E704,W503,W504", "--require-plugins=flake8-comprehensions,flake8-builtins" ] }, diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -135,7 +135,7 @@ Install all the code formatting tools on Debian Bullseye (11) or Ubuntu 20.04: ``` sudo apt-get install python3-autopep8 python3-pip php-codesniffer shellcheck yamllint -pip3 install "isort>=5.6.4" "mypy>=0.910" "flynt>=0.78" "flake8>=5" flake8-comprehensions flake8-builtins +pip3 install "black>=23.0" "isort>=5.6.4" "mypy>=0.910" "flynt>=0.78" "flake8>=5" flake8-comprehensions flake8-builtins echo "export PATH=\"`python3 -m site --user-base`/bin:\$PATH\"" >> ~/.bashrc source ~/.bashrc ``` diff --git a/arcanist/__phutil_library_map__.php b/arcanist/__phutil_library_map__.php --- a/arcanist/__phutil_library_map__.php +++ b/arcanist/__phutil_library_map__.php @@ -15,6 +15,7 @@ 'AssertWithSideEffectsLinter' => 'linter/AssertWithSideEffectsLinter.php', 'AutoPEP8FormatLinter' => 'linter/AutoPEP8Linter.php', 'BashShebangLinter' => 'linter/BashShebangLinter.php', + 'BlackFormatLinter' => 'linter/BlackLinter.php', 'BoostDependenciesLinter' => 'linter/BoostDependenciesLinter.php', 'CHeaderLinter' => 'linter/CHeaderLinter.php', 'CheckDocLinter' => 'linter/CheckDocLinter.php', @@ -63,6 +64,7 @@ 'AssertWithSideEffectsLinter' => 'ArcanistLinter', 'AutoPEP8FormatLinter' => 'ArcanistExternalLinter', 'BashShebangLinter' => 'ArcanistLinter', + 'BlackFormatLinter' => 'ArcanistExternalLinter', 'BoostDependenciesLinter' => 'AbstractGlobalExternalLinter', 'CHeaderLinter' => 'ArcanistLinter', 'CheckDocLinter' => 'AbstractGlobalExternalLinter', diff --git a/arcanist/linter/BlackLinter.php b/arcanist/linter/BlackLinter.php new file mode 100644 --- /dev/null +++ b/arcanist/linter/BlackLinter.php @@ -0,0 +1,112 @@ +getExecutableCommand(); + + $bin = csprintf('%C %Ls -', $executable, $this->getCommandFlags()); + + $futures = array(); + foreach ($paths as $path) { + $disk_path = $this->getEngine()->getFilePathOnDisk($path); + $future = new ExecFuture('%C', $bin); + /* Write the input file to stdin */ + $input = file_get_contents($disk_path); + $future->write($input); + $future->setCWD($this->getProjectRoot()); + $futures[$path] = $future; + } + + return $futures; + } + + public function getDefaultBinary() { + return 'black'; + } + + public function getVersion() { + list($stdout, $stderr) = execx('%C --version', + $this->getExecutableCommand()); + $matches = array(); + + /* Support a.b or a.b.c version numbering scheme */ + $regex = '/^black, (?P\d+\.\d+(?:\.\d+)?)/'; + + if (preg_match($regex, $stdout, $matches)) { + return $matches['version']; + } + + return false; + } + + public function getInstallInstructions() { + return pht('pip install black'); + } + + public function shouldExpectCommandErrors() { + return false; + } + + protected function getMandatoryFlags() { + return array(); + } + + protected function parseLinterOutput($path, $err, $stdout, $stderr) { + if ($err != 0) { + return false; + } + + $root = $this->getProjectRoot(); + $path = Filesystem::resolvePath($path, $root); + $orig = file_get_contents($path); + if ($orig == $stdout) { + return array(); + } + + $message = id(new ArcanistLintMessage()) + ->setPath($path) + ->setLine(1) + ->setChar(1) + ->setGranularity(ArcanistLinter::GRANULARITY_FILE) + ->setCode('BLACK') + ->setSeverity(ArcanistLintSeverity::SEVERITY_AUTOFIX) + ->setName('Code style violation') + ->setDescription("'$path' has code style errors.") + ->setOriginalText($orig) + ->setReplacementText($stdout); + + return array($message); + } +} diff --git a/contrib/utils/install-dependencies-bullseye.sh b/contrib/utils/install-dependencies-bullseye.sh --- a/contrib/utils/install-dependencies-bullseye.sh +++ b/contrib/utils/install-dependencies-bullseye.sh @@ -145,8 +145,8 @@ # For Chronik WebSocket endpoint pip3 install websocket-client -# Up-to-date mypy, isort and flynt packages are required python linters -pip3 install isort==5.6.4 mypy==0.910 flynt==0.78 flake8==6.0.0 +# Required python linters +pip3 install black==23.3.0 isort==5.6.4 mypy==0.910 flynt==0.78 flake8==6.0.0 echo "export PATH=\"$(python3 -m site --user-base)/bin:\$PATH\"" >> ~/.bashrc # shellcheck source=/dev/null source ~/.bashrc diff --git a/test/functional/abc-cmdline.py b/test/functional/abc-cmdline.py --- a/test/functional/abc-cmdline.py +++ b/test/functional/abc-cmdline.py @@ -19,7 +19,9 @@ from test_framework.util import assert_equal, assert_greater_than MAX_GENERATED_BLOCK_SIZE_ERROR = ( - 'Max generated block size (blockmaxsize) cannot exceed the excessive block size (excessiveblocksize)') + "Max generated block size (blockmaxsize) cannot exceed the excessive block size" + " (excessiveblocksize)" +) MAX_PCT_ADDR_TO_SEND = 23 @@ -36,8 +38,7 @@ return self.addr_count > 0 -class ABC_CmdLine_Test (BitcoinTestFramework): - +class ABC_CmdLine_Test(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 @@ -45,15 +46,17 @@ node = self.nodes[0] self.restart_node(0, extra_args=[f"-maxaddrtosend={max_addr_to_send}"]) - self.log.info(f'Testing -maxaddrtosend={max_addr_to_send}') + self.log.info(f"Testing -maxaddrtosend={max_addr_to_send}") # Fill addrman with enough entries for i in range(10000): addr = f"{(i >> 8) % 256}.{i % 256}.1.1" node.addpeeraddress(addr, 8333) - assert_greater_than(len(node.getnodeaddresses(0)), - int(max_addr_to_send / (MAX_PCT_ADDR_TO_SEND / 100))) + assert_greater_than( + len(node.getnodeaddresses(0)), + int(max_addr_to_send / (MAX_PCT_ADDR_TO_SEND / 100)), + ) mock_time = int(time.time()) @@ -67,15 +70,15 @@ assert_equal(peer.addr_count, max_addr_to_send) def check_excessive(self, expected_value): - 'Check that the excessiveBlockSize is as expected' + """Check that the excessiveBlockSize is as expected""" getsize = self.nodes[0].getexcessiveblock() - ebs = getsize['excessiveBlockSize'] + ebs = getsize["excessiveBlockSize"] assert_equal(ebs, expected_value) def check_subversion(self, pattern_str): - 'Check that the subversion is set as expected' + """Check that the subversion is set as expected""" netinfo = self.nodes[0].getnetworkinfo() - subversion = netinfo['subversion'] + subversion = netinfo["subversion"] pattern = re.compile(pattern_str) assert pattern.match(subversion) @@ -83,7 +86,8 @@ self.log.info("Testing -excessiveblocksize") self.log.info( - f" Set to twice the default, i.e. {2 * LEGACY_MAX_BLOCK_SIZE} bytes") + f" Set to twice the default, i.e. {2 * LEGACY_MAX_BLOCK_SIZE} bytes" + ) self.stop_node(0) self.start_node(0, [f"-excessiveblocksize={2 * LEGACY_MAX_BLOCK_SIZE}"]) self.check_excessive(2 * LEGACY_MAX_BLOCK_SIZE) @@ -92,24 +96,34 @@ self.log.info( " Attempt to set below legacy limit of 1MB - try " - f"{LEGACY_MAX_BLOCK_SIZE} bytes") + f"{LEGACY_MAX_BLOCK_SIZE} bytes" + ) self.stop_node(0) self.nodes[0].assert_start_raises_init_error( [f"-excessiveblocksize={LEGACY_MAX_BLOCK_SIZE}"], - 'Error: Excessive block size must be > 1,000,000 bytes (1MB)') + "Error: Excessive block size must be > 1,000,000 bytes (1MB)", + ) self.nodes[0].assert_start_raises_init_error( ["-excessiveblocksize=0"], - 'Error: Excessive block size must be > 1,000,000 bytes (1MB)') + "Error: Excessive block size must be > 1,000,000 bytes (1MB)", + ) self.nodes[0].assert_start_raises_init_error( ["-excessiveblocksize=-1"], - 'Error: Excessive block size must be > 1,000,000 bytes (1MB)') + "Error: Excessive block size must be > 1,000,000 bytes (1MB)", + ) self.log.info(" Attempt to set below blockmaxsize (mining limit)") self.nodes[0].assert_start_raises_init_error( - ['-blockmaxsize=1500000', '-excessiveblocksize=1300000'], f"Error: {MAX_GENERATED_BLOCK_SIZE_ERROR}") + ["-blockmaxsize=1500000", "-excessiveblocksize=1300000"], + f"Error: {MAX_GENERATED_BLOCK_SIZE_ERROR}", + ) self.nodes[0].assert_start_raises_init_error( - ['-blockmaxsize=0'], 'Error: Max generated block size must be greater than 0') + ["-blockmaxsize=0"], + "Error: Max generated block size must be greater than 0", + ) self.nodes[0].assert_start_raises_init_error( - ['-blockmaxsize=-1'], 'Error: Max generated block size must be greater than 0') + ["-blockmaxsize=-1"], + "Error: Max generated block size must be greater than 0", + ) # Make sure we leave the test with a node running as this is what thee # framework expects. @@ -124,5 +138,5 @@ self.excessiveblocksize_test() -if __name__ == '__main__': +if __name__ == "__main__": ABC_CmdLine_Test().main() diff --git a/test/functional/abc-get-invalid-block.py b/test/functional/abc-get-invalid-block.py --- a/test/functional/abc-get-invalid-block.py +++ b/test/functional/abc-get-invalid-block.py @@ -52,24 +52,39 @@ peer.send_message(msg) peer.sync_with_ping() - with node.assert_debug_log(expected_msgs=["ignoring request from peer=0 for old block that isn't in the main chain"]): + with node.assert_debug_log( + expected_msgs=[ + "ignoring request from peer=0 for old block that isn't in the main" + " chain" + ] + ): msg = msg_getdata() msg.inv.append(CInv(MSG_BLOCK, block_hash)) peer.send_message(msg) peer.sync_with_ping() - with node.assert_debug_log(expected_msgs=["ignoring request from peer=0 for old block that isn't in the main chain"]): + with node.assert_debug_log( + expected_msgs=[ + "ignoring request from peer=0 for old block that isn't in the main" + " chain" + ] + ): msg = msg_getdata() msg.inv.append(CInv(MSG_CMPCT_BLOCK, block_hash)) peer.send_message(msg) peer.sync_with_ping() - with node.assert_debug_log(expected_msgs=["ignoring request from peer=0 for old block header that isn't in the main chain"]): + with node.assert_debug_log( + expected_msgs=[ + "ignoring request from peer=0 for old block header that isn't in" + " the main chain" + ] + ): msg = msg_getheaders() msg.hashstop = block_hash peer.send_message(msg) peer.sync_with_ping() -if __name__ == '__main__': +if __name__ == "__main__": GetInvalidBlockTest().main() diff --git a/test/functional/abc-invalid-chains.py b/test/functional/abc-invalid-chains.py --- a/test/functional/abc-invalid-chains.py +++ b/test/functional/abc-invalid-chains.py @@ -18,10 +18,12 @@ self.tip = None self.blocks = {} self.block_heights = {} - self.extra_args = [[ - "-whitelist=noban@127.0.0.1", - "-automaticunparking=1", - ]] + self.extra_args = [ + [ + "-whitelist=noban@127.0.0.1", + "-automaticunparking=1", + ] + ] def next_block(self, number): if self.tip is None: @@ -81,11 +83,21 @@ # Mining on top of blocks 1 or 2 is rejected self.set_tip(1) peer.send_blocks_and_test( - [block(11)], node, success=False, force_send=True, reject_reason='bad-prevblk') + [block(11)], + node, + success=False, + force_send=True, + reject_reason="bad-prevblk", + ) self.set_tip(2) peer.send_blocks_and_test( - [block(21)], node, success=False, force_send=True, reject_reason='bad-prevblk') + [block(21)], + node, + success=False, + force_send=True, + reject_reason="bad-prevblk", + ) # Reconsider block 2 to remove invalid status from *both* 1 and 2 # The goal is to test that block 1 is not retaining any internal state @@ -109,8 +121,10 @@ # Sanity checks assert_equal(self.blocks[24].hash, node.getbestblockhash()) - assert any(self.blocks[221].hash == chaintip["hash"] - for chaintip in node.getchaintips()) + assert any( + self.blocks[221].hash == chaintip["hash"] + for chaintip in node.getchaintips() + ) # Invalidating the block 2 chain should reject new blocks on that chain node.invalidateblock(self.blocks[2].hash) @@ -119,7 +133,12 @@ # Mining on the block 2 chain should be rejected self.set_tip(24) peer.send_blocks_and_test( - [block(25)], node, success=False, force_send=True, reject_reason='bad-prevblk') + [block(25)], + node, + success=False, + force_send=True, + reject_reason="bad-prevblk", + ) # Continued mining on the block 1 chain is still ok self.set_tip(13) @@ -129,15 +148,21 @@ # which is now invalid, should also be rejected. self.set_tip(221) peer.send_blocks_and_test( - [block(222)], node, success=False, force_send=True, reject_reason='bad-prevblk') + [block(222)], + node, + success=False, + force_send=True, + reject_reason="bad-prevblk", + ) self.log.info( - "Make sure that reconsidering a block behaves correctly when cousin chains (neither ancestors nor descendants) become available as a result") + "Make sure that reconsidering a block behaves correctly when cousin chains" + " (neither ancestors nor descendants) become available as a result" + ) # Reorg out 14 with four blocks. self.set_tip(13) - peer.send_blocks_and_test( - [block(15), block(16), block(17), block(18)], node) + peer.send_blocks_and_test([block(15), block(16), block(17), block(18)], node) # Invalidate 17 (so 18 now has failed parent) node.invalidateblock(self.blocks[17].hash) @@ -155,5 +180,5 @@ assert_equal(self.blocks[16].hash, node.getbestblockhash()) -if __name__ == '__main__': +if __name__ == "__main__": InvalidChainsTest().main() diff --git a/test/functional/abc-invalid-message.py b/test/functional/abc-invalid-message.py --- a/test/functional/abc-invalid-message.py +++ b/test/functional/abc-invalid-message.py @@ -30,8 +30,7 @@ class BadVersionP2PInterface(P2PInterface): - def peer_connect(self, *args, services=NODE_NETWORK, - send_version=False, **kwargs): + def peer_connect(self, *args, services=NODE_NETWORK, send_version=False, **kwargs): self.services = services return super().peer_connect(*args, send_version=send_version, **kwargs) @@ -59,15 +58,15 @@ # check happened, causing the test to fail. bad_interface = BadVersionP2PInterface() self.nodes[0].add_p2p_connection( - bad_interface, send_version=False, wait_for_verack=False) + bad_interface, send_version=False, wait_for_verack=False + ) # Also connect to a node with a valid version message interface = P2PInterface() # Node with valid version message should connect successfully connection = self.nodes[1].add_p2p_connection(interface) - self.log.info( - "Send an invalid version message and check we get banned") + self.log.info("Send an invalid version message and check we get banned") bad_interface.send_version() bad_interface.wait_for_disconnect() @@ -79,6 +78,7 @@ if not interface.last_message.get("pong"): return False return interface.last_message["pong"].nonce == interface.ping_counter + interface.wait_until(check_ping) interface.ping_counter += 1 @@ -95,5 +95,5 @@ interface.wait_for_disconnect() -if __name__ == '__main__': +if __name__ == "__main__": InvalidMessageTest().main() diff --git a/test/functional/abc-magnetic-anomaly-mining.py b/test/functional/abc-magnetic-anomaly-mining.py --- a/test/functional/abc-magnetic-anomaly-mining.py +++ b/test/functional/abc-magnetic-anomaly-mining.py @@ -25,7 +25,7 @@ self.blocks = {} self.mocktime = int(time.time()) - 600 * 100 - extra_arg = ['-spendzeroconfchange=0', '-whitelist=noban@127.0.0.1'] + extra_arg = ["-spendzeroconfchange=0", "-whitelist=noban@127.0.0.1"] self.extra_args = [extra_arg, extra_arg] def skip_test_if_missing_module(self): @@ -57,17 +57,13 @@ # Grab a random number of inputs for _ in range(random.randrange(1, 5)): txin = unspent.pop() - inputs.append({ - 'txid': txin['txid'], - 'vout': 0 # This is a coinbase - }) + inputs.append({"txid": txin["txid"], "vout": 0}) # This is a coinbase if len(unspent) == 0: break outputs = {} # Calculate a unique fee for this transaction - fee = decimal.Decimal(random.randint( - 1000, 2000)) / decimal.Decimal(1e2) + fee = decimal.Decimal(random.randint(1000, 2000)) / decimal.Decimal(1e2) # NOTE: There will be 1 sigCheck per output (which equals the number # of inputs now). We need this randomization to ensure the @@ -88,32 +84,31 @@ rawtx = mining_node.createrawtransaction(inputs, outputs) signedtx = mining_node.signrawtransactionwithwallet(rawtx) - txid = mining_node.sendrawtransaction(signedtx['hex']) + txid = mining_node.sendrawtransaction(signedtx["hex"]) # number of outputs is the same as the number of sigCheck in this # case - transactions.update( - {txid: {'fee': fee, 'sigchecks': len(outputs)}}) + transactions.update({txid: {"fee": fee, "sigchecks": len(outputs)}}) tmpl = mining_node.getblocktemplate() - assert 'proposal' in tmpl['capabilities'] + assert "proposal" in tmpl["capabilities"] # Check the template transaction metadata and ordering last_txid = 0 - for txn in tmpl['transactions'][1:]: - txid = txn['txid'] + for txn in tmpl["transactions"][1:]: + txid = txn["txid"] txnMetadata = transactions[txid] - expectedFeeSats = int(txnMetadata['fee'] * 10**2) - expectedSigChecks = txnMetadata['sigchecks'] + expectedFeeSats = int(txnMetadata["fee"] * 10**2) + expectedSigChecks = txnMetadata["sigchecks"] txid_decoded = int(txid, 16) # Assert we got the expected metadata - assert expectedFeeSats == txn['fee'] - assert expectedSigChecks == txn['sigchecks'] + assert expectedFeeSats == txn["fee"] + assert expectedSigChecks == txn["sigchecks"] # Assert transaction ids are in order assert last_txid == 0 or last_txid < txid_decoded last_txid = txid_decoded -if __name__ == '__main__': +if __name__ == "__main__": CTORMiningTest().main() diff --git a/test/functional/abc-mempool-coherence-on-activations.py b/test/functional/abc-mempool-coherence-on-activations.py --- a/test/functional/abc-mempool-coherence-on-activations.py +++ b/test/functional/abc-mempool-coherence-on-activations.py @@ -50,7 +50,10 @@ FIRST_BLOCK_TIME = ACTIVATION_TIME - 86400 # Expected RPC error when trying to send an activation specific spend txn. -RPC_EXPECTED_ERROR = "mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)" +RPC_EXPECTED_ERROR = ( + "mandatory-script-verify-flag-failed (Signature must be zero for failed" + " CHECK(MULTI)SIG operation)" +) def create_fund_and_activation_specific_spending_tx(spend, pre_fork_only): @@ -71,23 +74,24 @@ # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_tx_with_script( - spend.tx, spend.n, b'', amount=50 * COIN, script_pub_key=script) + spend.tx, spend.n, b"", amount=50 * COIN, script_pub_key=script + ) txfund.rehash() # Activation specific spending tx txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 1000, CScript([OP_TRUE]))) - txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) + txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b"")) # Sign the transaction # Use forkvalues that create pre-fork-only or post-fork-only # transactions. - forkvalue = 0 if pre_fork_only else 0xffdead + forkvalue = 0 if pre_fork_only else 0xFFDEAD sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID - sighash = SignatureHashForkId( - script, txspend, 0, sighashtype, 50 * COIN) - sig = private_key.sign_ecdsa(sighash) + \ - bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) + sighash = SignatureHashForkId(script, txspend, 0, sighashtype, 50 * COIN) + sig = private_key.sign_ecdsa(sighash) + bytes( + bytearray([SIGHASH_ALL | SIGHASH_FORKID]) + ) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() @@ -95,39 +99,37 @@ def create_fund_and_pre_fork_only_tx(spend): - return create_fund_and_activation_specific_spending_tx( - spend, pre_fork_only=True) + return create_fund_and_activation_specific_spending_tx(spend, pre_fork_only=True) def create_fund_and_post_fork_only_tx(spend): - return create_fund_and_activation_specific_spending_tx( - spend, pre_fork_only=False) + return create_fund_and_activation_specific_spending_tx(spend, pre_fork_only=False) # ---Mempool coherence on activations test--- class PreviousSpendableOutput(object): - def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n class MempoolCoherenceOnActivationsTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} - self.extra_args = [[ - '-whitelist=noban@127.0.0.1', - EXTRA_ARG, - '-acceptnonstdtxn=1', - '-automaticunparking=1', - ]] + self.extra_args = [ + [ + "-whitelist=noban@127.0.0.1", + EXTRA_ARG, + "-acceptnonstdtxn=1", + "-automaticunparking=1", + ] + ] def next_block(self, number): if self.tip is None: @@ -178,8 +180,7 @@ # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: - self.block_heights[ - block.sha256] = self.block_heights[old_sha256] + self.block_heights[block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block @@ -198,7 +199,12 @@ # spendable output for further chaining. def create_always_valid_chained_tx(spend): tx = create_tx_with_script( - spend.tx, spend.n, b'', amount=spend.tx.vout[0].nValue - 1000, script_pub_key=CScript([OP_TRUE])) + spend.tx, + spend.n, + b"", + amount=spend.tx.vout[0].nValue - 1000, + script_pub_key=CScript([OP_TRUE]), + ) tx.rehash() return tx, PreviousSpendableOutput(tx, 0) @@ -243,9 +249,7 @@ peer.send_blocks_and_test([block(5200 + i)], node) # Check we are just before the activation time - assert_equal( - node.getblockchaininfo()['mediantime'], - ACTIVATION_TIME - 1) + assert_equal(node.getblockchaininfo()["mediantime"], ACTIVATION_TIME - 1) # We are just before the fork. Pre-fork-only and always-valid chained # txns (tx_chain0, tx_chain1) are valid, post-fork-only txns are @@ -254,13 +258,16 @@ send_transaction_to_mempool(tx_pre1) tx_chain0, last_chained_output = create_always_valid_chained_tx(out[4]) tx_chain1, last_chained_output = create_always_valid_chained_tx( - last_chained_output) + last_chained_output + ) send_transaction_to_mempool(tx_chain0) send_transaction_to_mempool(tx_chain1) - assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, - node.sendrawtransaction, ToHex(tx_post0)) - assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, - node.sendrawtransaction, ToHex(tx_post1)) + assert_raises_rpc_error( + -26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_post0) + ) + assert_raises_rpc_error( + -26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_post1) + ) check_mempool_equal([tx_chain0, tx_chain1, tx_pre0, tx_pre1]) # Activate the fork. Mine the 1st always-valid chained txn and a @@ -271,8 +278,7 @@ forkblockid = node.getbestblockhash() # Check we just activated the fork - assert_equal(node.getblockheader(forkblockid)['mediantime'], - ACTIVATION_TIME) + assert_equal(node.getblockheader(forkblockid)["mediantime"], ACTIVATION_TIME) # Check mempool coherence when activating the fork. Pre-fork-only txns # were evicted from the mempool, while always-valid txns remain. @@ -285,8 +291,9 @@ send_transaction_to_mempool(tx_post1) tx_chain2, _ = create_always_valid_chained_tx(last_chained_output) send_transaction_to_mempool(tx_chain2) - assert_raises_rpc_error(-26, RPC_EXPECTED_ERROR, - node.sendrawtransaction, ToHex(tx_pre1)) + assert_raises_rpc_error( + -26, RPC_EXPECTED_ERROR, node.sendrawtransaction, ToHex(tx_pre1) + ) check_mempool_equal([tx_chain1, tx_chain2, tx_post0, tx_post1]) # Mine the 2nd always-valid chained txn and a post-fork-only txn. @@ -353,17 +360,14 @@ # Perform the reorg peer.send_blocks_and_test(reorg_blocks, node) # reorg finishes after the fork - assert_equal( - node.getblockchaininfo()['mediantime'], - ACTIVATION_TIME + 2) + assert_equal(node.getblockchaininfo()["mediantime"], ACTIVATION_TIME + 2) # In old mempool: tx_chain2, tx_post1 # Recovered from blocks: tx_chain0, tx_chain1, tx_post0 # Lost from blocks: tx_pre0 # Retained from old mempool: tx_chain2, tx_post1 # Evicted from old mempool: NONE - check_mempool_equal( - [tx_chain0, tx_chain1, tx_chain2, tx_post0, tx_post1]) + check_mempool_equal([tx_chain0, tx_chain1, tx_chain2, tx_post0, tx_post1]) -if __name__ == '__main__': +if __name__ == "__main__": MempoolCoherenceOnActivationsTest().main() diff --git a/test/functional/abc-minimaldata.py b/test/functional/abc-minimaldata.py --- a/test/functional/abc-minimaldata.py +++ b/test/functional/abc-minimaldata.py @@ -34,18 +34,19 @@ from test_framework.util import assert_raises_rpc_error # Minimal push violations in mempool are rejected with a bannable error. -MINIMALPUSH_ERROR = 'mandatory-script-verify-flag-failed (Data push larger than necessary)' +MINIMALPUSH_ERROR = ( + "mandatory-script-verify-flag-failed (Data push larger than necessary)" +) # Blocks with invalid scripts give this error: -BADINPUTS_ERROR = 'blk-bad-inputs' +BADINPUTS_ERROR = "blk-bad-inputs" class MinimaldataTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 1 self.block_heights = {} - self.extra_args = [['-acceptnonstdtxn=1']] + self.extra_args = [["-acceptnonstdtxn=1"]] def reconnect_p2p(self): """Tear down and bootstrap the P2P connection to the node. @@ -72,8 +73,7 @@ block_height = self.block_heights[parent.sha256] + 1 block_time = (parent.nTime + 1) if nTime is None else nTime - block = create_block( - parent.sha256, create_coinbase(block_height), block_time) + block = create_block(parent.sha256, create_coinbase(block_height), block_time) block.vtx.extend(transactions) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() @@ -86,7 +86,12 @@ (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_txs_and_test( - [tx], self.nodes[0], success=False, expect_disconnect=True, reject_reason=reject_reason) + [tx], + self.nodes[0], + success=False, + expect_disconnect=True, + reject_reason=reject_reason, + ) self.reconnect_p2p() def check_for_ban_on_rejected_block(self, block, reject_reason=None): @@ -94,11 +99,16 @@ (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_blocks_and_test( - [block], self.nodes[0], success=False, reject_reason=reject_reason, expect_disconnect=True) + [block], + self.nodes[0], + success=False, + reject_reason=reject_reason, + expect_disconnect=True, + ) self.reconnect_p2p() def run_test(self): - node, = self.nodes + (node,) = self.nodes self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -129,20 +139,18 @@ # Fund transaction txfund = create_tx_with_script( - spendfrom, 0, b'', amount=value, script_pub_key=script) + spendfrom, 0, b"", amount=value, script_pub_key=script + ) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() - txspend.vout.append( - CTxOut(value - 1000, CScript([OP_TRUE]))) - txspend.vin.append( - CTxIn(COutPoint(txfund.sha256, 0), b'')) + txspend.vout.append(CTxOut(value - 1000, CScript([OP_TRUE]))) + txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b"")) # Sign the transaction - txspend.vin[0].scriptSig = CScript( - b'\x01\x01\x51') # PUSH1(0x01) OP_1 + txspend.vin[0].scriptSig = CScript(b"\x01\x01\x51") # PUSH1(0x01) OP_1 pad_tx(txspend) txspend.rehash() @@ -156,18 +164,18 @@ self.log.info("Trying to mine a minimaldata violation.") self.check_for_ban_on_rejected_block( - self.build_block(tip, [nonminimaltx]), BADINPUTS_ERROR) - self.log.info( - "If we try to submit it by mempool or RPC we are banned") - assert_raises_rpc_error(-26, MINIMALPUSH_ERROR, - node.sendrawtransaction, ToHex(nonminimaltx)) - self.check_for_ban_on_rejected_tx( - nonminimaltx, MINIMALPUSH_ERROR) + self.build_block(tip, [nonminimaltx]), BADINPUTS_ERROR + ) + self.log.info("If we try to submit it by mempool or RPC we are banned") + assert_raises_rpc_error( + -26, MINIMALPUSH_ERROR, node.sendrawtransaction, ToHex(nonminimaltx) + ) + self.check_for_ban_on_rejected_tx(nonminimaltx, MINIMALPUSH_ERROR) self.log.info("Mine a normal block") tip = self.build_block(tip) node.p2ps[0].send_blocks_and_test([tip], node) -if __name__ == '__main__': +if __name__ == "__main__": MinimaldataTest().main() diff --git a/test/functional/abc-parkedchain.py b/test/functional/abc-parkedchain.py --- a/test/functional/abc-parkedchain.py +++ b/test/functional/abc-parkedchain.py @@ -19,7 +19,7 @@ ], [ "-automaticunparking=1", - ] + ], ] def skip_test_if_missing_module(self): @@ -39,6 +39,7 @@ def wait_for_tip(node, tip): def check_tip(): return node.getbestblockhash() == tip + self.wait_until(check_tip) node = self.nodes[0] @@ -154,27 +155,27 @@ assert tip["status"] != "active" return tip["status"] == "parked" return False + self.wait_until(check_block) def check_reorg_protection(depth, extra_blocks): - self.log.info( - f"Test deep reorg parking, {depth} block deep") + self.log.info(f"Test deep reorg parking, {depth} block deep") # Invalidate the tip on node 0, so it doesn't follow node 1. node.invalidateblock(node.getbestblockhash()) # Mine block to create a fork of proper depth - self.generatetoaddress(parking_node, - nblocks=depth - 1, - address=parking_node.getnewaddress( - label='coinbase'), - sync_fun=self.no_op, - ) - self.generatetoaddress(node, - nblocks=depth, - address=node.getnewaddress( - label='coinbase'), - sync_fun=self.no_op, - ) + self.generatetoaddress( + parking_node, + nblocks=depth - 1, + address=parking_node.getnewaddress(label="coinbase"), + sync_fun=self.no_op, + ) + self.generatetoaddress( + node, + nblocks=depth, + address=node.getnewaddress(label="coinbase"), + sync_fun=self.no_op, + ) # extra block should now find themselves parked for _ in range(extra_blocks): self.generate(node, 1, sync_fun=self.no_op) @@ -197,16 +198,18 @@ check_reorg_protection(3, 1) self.log.info( - "Accepting many blocks at once (possibly out of order) should not park if there is no reorg.") + "Accepting many blocks at once (possibly out of order) should not park if" + " there is no reorg." + ) # rewind one block to make a reorg that is shallow. node.invalidateblock(parking_node.getbestblockhash()) # generate a ton of blocks at once. try: with parking_node.assert_debug_log(["Park block"]): # Also waits for chain sync - self.generatetoaddress(node, - nblocks=20, - address=node.getnewaddress(label='coinbase')) + self.generatetoaddress( + node, nblocks=20, address=node.getnewaddress(label="coinbase") + ) except AssertionError as exc: # good, we want an absence of "Park block" messages assert "does not partially match log" in exc.args[0] @@ -217,11 +220,12 @@ # Set up parking node height = fork + 4, node height = fork + 5 node.invalidateblock(node.getbestblockhash()) self.generate(parking_node, 3, sync_fun=self.no_op) - self.generatetoaddress(node, - nblocks=5, - address=node.getnewaddress(label='coinbase'), - sync_fun=self.no_op, - ) + self.generatetoaddress( + node, + nblocks=5, + address=node.getnewaddress(label="coinbase"), + sync_fun=self.no_op, + ) wait_for_parked_block(node.getbestblockhash()) # Restart the parking node without parkdeepreorg. self.restart_node(1, self.extra_args[1] + ["-parkdeepreorg=0"]) @@ -241,5 +245,5 @@ # Parking node is no longer parking. -if __name__ == '__main__': +if __name__ == "__main__": ParkedChainTest().main() diff --git a/test/functional/abc-replay-protection.py b/test/functional/abc-replay-protection.py --- a/test/functional/abc-replay-protection.py +++ b/test/functional/abc-replay-protection.py @@ -35,27 +35,32 @@ REPLAY_PROTECTION_START_TIME = 2000000000 # Error due to invalid signature -RPC_INVALID_SIGNATURE_ERROR = "mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)" +RPC_INVALID_SIGNATURE_ERROR = ( + "mandatory-script-verify-flag-failed (Signature must be zero for failed" + " CHECK(MULTI)SIG operation)" +) class PreviousSpendableOutput(object): - def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n class ReplayProtectionTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} - self.extra_args = [['-whitelist=noban@127.0.0.1', - f"-replayprotectionactivationtime={REPLAY_PROTECTION_START_TIME}", - "-acceptnonstdtxn=1"]] + self.extra_args = [ + [ + "-whitelist=noban@127.0.0.1", + f"-replayprotectionactivationtime={REPLAY_PROTECTION_START_TIME}", + "-acceptnonstdtxn=1", + ] + ] def next_block(self, number): if self.tip is None: @@ -111,8 +116,7 @@ # Update the internal state just like in next_block self.tip = block if block.sha256 != old_sha256: - self.block_heights[ - block.sha256] = self.block_heights[old_sha256] + self.block_heights[block.sha256] = self.block_heights[old_sha256] del self.block_heights[old_sha256] self.blocks[block_number] = block return block @@ -148,20 +152,23 @@ # Fund transaction script = CScript([public_key, OP_CHECKSIG]) txfund = create_tx_with_script( - spend.tx, spend.n, b'', amount=50 * COIN - 1000, script_pub_key=script) + spend.tx, spend.n, b"", amount=50 * COIN - 1000, script_pub_key=script + ) txfund.rehash() # Spend transaction txspend = CTransaction() txspend.vout.append(CTxOut(50 * COIN - 2000, CScript([OP_TRUE]))) - txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b'')) + txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b"")) # Sign the transaction sighashtype = (forkvalue << 8) | SIGHASH_ALL | SIGHASH_FORKID sighash = SignatureHashForkId( - script, txspend, 0, sighashtype, 50 * COIN - 1000) - sig = private_key.sign_ecdsa(sighash) + \ - bytes(bytearray([SIGHASH_ALL | SIGHASH_FORKID])) + script, txspend, 0, sighashtype, 50 * COIN - 1000 + ) + sig = private_key.sign_ecdsa(sighash) + bytes( + bytearray([SIGHASH_ALL | SIGHASH_FORKID]) + ) txspend.vin[0].scriptSig = CScript([sig]) txspend.rehash() @@ -183,16 +190,21 @@ peer.send_blocks_and_test([self.tip], node) # Replay protected transactions are rejected. - replay_txns = create_fund_and_spend_tx(out[1], 0xffdead) + replay_txns = create_fund_and_spend_tx(out[1], 0xFFDEAD) send_transaction_to_mempool(replay_txns[0]) - assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, - node.sendrawtransaction, ToHex(replay_txns[1])) + assert_raises_rpc_error( + -26, + RPC_INVALID_SIGNATURE_ERROR, + node.sendrawtransaction, + ToHex(replay_txns[1]), + ) # And block containing them are rejected as well. block(2) update_block(2, replay_txns) peer.send_blocks_and_test( - [self.tip], node, success=False, reject_reason='blk-bad-inputs') + [self.tip], node, success=False, reject_reason="blk-bad-inputs" + ) # Rewind bad block self.set_tip(1) @@ -211,17 +223,22 @@ # Check we are just before the activation time assert_equal( - node.getblockchaininfo()['mediantime'], - REPLAY_PROTECTION_START_TIME - 1) + node.getblockchaininfo()["mediantime"], REPLAY_PROTECTION_START_TIME - 1 + ) # We are just before the fork, replay protected txns still are rejected - assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, - node.sendrawtransaction, ToHex(replay_txns[1])) + assert_raises_rpc_error( + -26, + RPC_INVALID_SIGNATURE_ERROR, + node.sendrawtransaction, + ToHex(replay_txns[1]), + ) block(3) update_block(3, replay_txns) peer.send_blocks_and_test( - [self.tip], node, success=False, reject_reason='blk-bad-inputs') + [self.tip], node, success=False, reject_reason="blk-bad-inputs" + ) # Rewind bad block self.set_tip(5104) @@ -238,8 +255,8 @@ # Check we just activated the replay protection assert_equal( - node.getblockchaininfo()['mediantime'], - REPLAY_PROTECTION_START_TIME) + node.getblockchaininfo()["mediantime"], REPLAY_PROTECTION_START_TIME + ) # Non replay protected transactions are not valid anymore, # so they should be removed from the mempool. @@ -247,14 +264,16 @@ # Good old transactions are now invalid. send_transaction_to_mempool(txns[0]) - assert_raises_rpc_error(-26, RPC_INVALID_SIGNATURE_ERROR, - node.sendrawtransaction, ToHex(txns[1])) + assert_raises_rpc_error( + -26, RPC_INVALID_SIGNATURE_ERROR, node.sendrawtransaction, ToHex(txns[1]) + ) # They also cannot be mined block(4) update_block(4, txns) peer.send_blocks_and_test( - [self.tip], node, success=False, reject_reason='blk-bad-inputs') + [self.tip], node, success=False, reject_reason="blk-bad-inputs" + ) # Rewind bad block self.set_tip(5556) @@ -269,8 +288,8 @@ found_id0 = False found_id1 = False - for txn in tmpl['transactions']: - txid = txn['txid'] + for txn in tmpl["transactions"]: + txid = txn["txid"] if txid == replay_tx0_id: found_id0 = True elif txid == replay_tx1_id: @@ -309,5 +328,5 @@ assert replay_tx1_id not in set(node.getrawmempool()) -if __name__ == '__main__': +if __name__ == "__main__": ReplayProtectionTest().main() diff --git a/test/functional/abc-schnorr.py b/test/functional/abc-schnorr.py --- a/test/functional/abc-schnorr.py +++ b/test/functional/abc-schnorr.py @@ -42,29 +42,33 @@ # A mandatory (bannable) error occurs when people pass Schnorr signatures # into OP_CHECKMULTISIG. -SCHNORR_MULTISIG_ERROR = 'mandatory-script-verify-flag-failed (Signature cannot be 65 bytes in CHECKMULTISIG)' +SCHNORR_MULTISIG_ERROR = ( + "mandatory-script-verify-flag-failed (Signature cannot be 65 bytes in" + " CHECKMULTISIG)" +) # A mandatory (bannable) error occurs when people send invalid Schnorr # sigs into OP_CHECKSIG. -NULLFAIL_ERROR = 'mandatory-script-verify-flag-failed (Signature must be zero for failed CHECK(MULTI)SIG operation)' +NULLFAIL_ERROR = ( + "mandatory-script-verify-flag-failed (Signature must be zero for failed" + " CHECK(MULTI)SIG operation)" +) # Blocks with invalid scripts give this error: -BADINPUTS_ERROR = 'blk-bad-inputs' +BADINPUTS_ERROR = "blk-bad-inputs" # This 64-byte signature is used to test exclusion & banning according to # the above error messages. # Tests of real 64 byte ECDSA signatures can be found in script_tests. -sig64 = b'\0' * 64 +sig64 = b"\0" * 64 class SchnorrTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 1 self.block_heights = {} - self.extra_args = [[ - "-acceptnonstdtxn=1"]] + self.extra_args = [["-acceptnonstdtxn=1"]] def reconnect_p2p(self): """Tear down and bootstrap the P2P connection to the node. @@ -91,8 +95,7 @@ block_height = self.block_heights[parent.sha256] + 1 block_time = (parent.nTime + 1) if nTime is None else nTime - block = create_block( - parent.sha256, create_coinbase(block_height), block_time) + block = create_block(parent.sha256, create_coinbase(block_height), block_time) block.vtx.extend(transactions) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() @@ -105,7 +108,12 @@ (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_txs_and_test( - [tx], self.nodes[0], success=False, reject_reason=reject_reason, expect_disconnect=True) + [tx], + self.nodes[0], + success=False, + reject_reason=reject_reason, + expect_disconnect=True, + ) self.reconnect_p2p() def check_for_ban_on_rejected_block(self, block, reject_reason=None): @@ -113,11 +121,16 @@ (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_blocks_and_test( - [block], self.nodes[0], success=False, reject_reason=reject_reason, expect_disconnect=True) + [block], + self.nodes[0], + success=False, + reject_reason=reject_reason, + expect_disconnect=True, + ) self.reconnect_p2p() def run_test(self): - node, = self.nodes + (node,) = self.nodes self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -145,7 +158,7 @@ # get uncompressed public key serialization public_key = private_key.get_pubkey().get_bytes() - def create_fund_and_spend_tx(multi=False, sig='schnorr'): + def create_fund_and_spend_tx(multi=False, sig="schnorr"): spendfrom = spendable_outputs.pop() if multi: @@ -157,30 +170,28 @@ # Fund transaction txfund = create_tx_with_script( - spendfrom, 0, b'', amount=value, script_pub_key=script) + spendfrom, 0, b"", amount=value, script_pub_key=script + ) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() - txspend.vout.append( - CTxOut(value - 1000, CScript([OP_TRUE]))) - txspend.vin.append( - CTxIn(COutPoint(txfund.sha256, 0), b'')) + txspend.vout.append(CTxOut(value - 1000, CScript([OP_TRUE]))) + txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b"")) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID - hashbyte = bytes([sighashtype & 0xff]) - sighash = SignatureHashForkId( - script, txspend, 0, sighashtype, value) - if sig == 'schnorr': + hashbyte = bytes([sighashtype & 0xFF]) + sighash = SignatureHashForkId(script, txspend, 0, sighashtype, value) + if sig == "schnorr": txsig = private_key.sign_schnorr(sighash) + hashbyte - elif sig == 'ecdsa': + elif sig == "ecdsa": txsig = private_key.sign_ecdsa(sighash) + hashbyte elif isinstance(sig, bytes): txsig = sig + hashbyte if multi: - txspend.vin[0].scriptSig = CScript([b'', txsig]) + txspend.vin[0].scriptSig = CScript([b"", txsig]) else: txspend.vin[0].scriptSig = CScript([txsig]) txspend.rehash() @@ -189,7 +200,7 @@ schnorrchecksigtx = create_fund_and_spend_tx() schnorrmultisigtx = create_fund_and_spend_tx(multi=True) - ecdsachecksigtx = create_fund_and_spend_tx(sig='ecdsa') + ecdsachecksigtx = create_fund_and_spend_tx(sig="ecdsa") sig64checksigtx = create_fund_and_spend_tx(sig=sig64) sig64multisigtx = create_fund_and_spend_tx(multi=True, sig=sig64) @@ -197,8 +208,7 @@ node.p2ps[0].send_blocks_and_test([tip], node) self.log.info("Typical ECDSA and Schnorr CHECKSIG are valid.") - node.p2ps[0].send_txs_and_test( - [schnorrchecksigtx, ecdsachecksigtx], node) + node.p2ps[0].send_txs_and_test([schnorrchecksigtx, ecdsachecksigtx], node) # They get mined as usual. self.generate(node, 1, sync_fun=self.no_op) tip = self.getbestblock(node) @@ -208,32 +218,39 @@ assert not node.getrawmempool() self.log.info("Schnorr in multisig is rejected with mandatory error.") - assert_raises_rpc_error(-26, SCHNORR_MULTISIG_ERROR, - node.sendrawtransaction, ToHex(schnorrmultisigtx)) + assert_raises_rpc_error( + -26, + SCHNORR_MULTISIG_ERROR, + node.sendrawtransaction, + ToHex(schnorrmultisigtx), + ) # And it is banworthy. - self.check_for_ban_on_rejected_tx( - schnorrmultisigtx, SCHNORR_MULTISIG_ERROR) + self.check_for_ban_on_rejected_tx(schnorrmultisigtx, SCHNORR_MULTISIG_ERROR) # And it can't be mined self.check_for_ban_on_rejected_block( - self.build_block(tip, [schnorrmultisigtx]), BADINPUTS_ERROR) + self.build_block(tip, [schnorrmultisigtx]), BADINPUTS_ERROR + ) self.log.info("Bad 64-byte sig is rejected with mandatory error.") # In CHECKSIG it's invalid Schnorr and hence NULLFAIL. - assert_raises_rpc_error(-26, NULLFAIL_ERROR, - node.sendrawtransaction, ToHex(sig64checksigtx)) + assert_raises_rpc_error( + -26, NULLFAIL_ERROR, node.sendrawtransaction, ToHex(sig64checksigtx) + ) # In CHECKMULTISIG it's invalid length and hence BAD_LENGTH. - assert_raises_rpc_error(-26, SCHNORR_MULTISIG_ERROR, - node.sendrawtransaction, ToHex(sig64multisigtx)) + assert_raises_rpc_error( + -26, SCHNORR_MULTISIG_ERROR, node.sendrawtransaction, ToHex(sig64multisigtx) + ) # Sending these transactions is banworthy. self.check_for_ban_on_rejected_tx(sig64checksigtx, NULLFAIL_ERROR) - self.check_for_ban_on_rejected_tx( - sig64multisigtx, SCHNORR_MULTISIG_ERROR) + self.check_for_ban_on_rejected_tx(sig64multisigtx, SCHNORR_MULTISIG_ERROR) # And they can't be mined either... self.check_for_ban_on_rejected_block( - self.build_block(tip, [sig64checksigtx]), BADINPUTS_ERROR) + self.build_block(tip, [sig64checksigtx]), BADINPUTS_ERROR + ) self.check_for_ban_on_rejected_block( - self.build_block(tip, [sig64multisigtx]), BADINPUTS_ERROR) + self.build_block(tip, [sig64multisigtx]), BADINPUTS_ERROR + ) -if __name__ == '__main__': +if __name__ == "__main__": SchnorrTest().main() diff --git a/test/functional/abc-schnorrmultisig.py b/test/functional/abc-schnorrmultisig.py --- a/test/functional/abc-schnorrmultisig.py +++ b/test/functional/abc-schnorrmultisig.py @@ -45,18 +45,23 @@ # ECDSA checkmultisig with non-null dummy are invalid since the new mode # refuses ECDSA. -ECDSA_NULLDUMMY_ERROR = 'mandatory-script-verify-flag-failed (Only Schnorr signatures allowed in this operation)' +ECDSA_NULLDUMMY_ERROR = ( + "mandatory-script-verify-flag-failed (Only Schnorr signatures allowed in this" + " operation)" +) # A mandatory (bannable) error occurs when people pass Schnorr signatures into # legacy OP_CHECKMULTISIG. -SCHNORR_LEGACY_MULTISIG_ERROR = 'mandatory-script-verify-flag-failed (Signature cannot be 65 bytes in CHECKMULTISIG)' +SCHNORR_LEGACY_MULTISIG_ERROR = ( + "mandatory-script-verify-flag-failed (Signature cannot be 65 bytes in" + " CHECKMULTISIG)" +) # Blocks with invalid scripts give this error: -BADINPUTS_ERROR = 'blk-bad-inputs' +BADINPUTS_ERROR = "blk-bad-inputs" class SchnorrMultisigTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 1 self.block_heights = {} @@ -87,8 +92,7 @@ block_height = self.block_heights[parent.sha256] + 1 block_time = (parent.nTime + 1) if nTime is None else nTime - block = create_block( - parent.sha256, create_coinbase(block_height), block_time) + block = create_block(parent.sha256, create_coinbase(block_height), block_time) block.vtx.extend(transactions) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() @@ -101,7 +105,12 @@ (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_txs_and_test( - [tx], self.nodes[0], success=False, expect_disconnect=True, reject_reason=reject_reason) + [tx], + self.nodes[0], + success=False, + expect_disconnect=True, + reject_reason=reject_reason, + ) self.reconnect_p2p() def check_for_ban_on_rejected_block(self, block, reject_reason=None): @@ -109,11 +118,16 @@ (Can't actually get banned, since bitcoind won't ban local peers.)""" self.nodes[0].p2ps[0].send_blocks_and_test( - [block], self.nodes[0], success=False, reject_reason=reject_reason, expect_disconnect=True) + [block], + self.nodes[0], + success=False, + reject_reason=reject_reason, + expect_disconnect=True, + ) self.reconnect_p2p() def run_test(self): - node, = self.nodes + (node,) = self.nodes self.nodes[0].add_p2p_connection(P2PDataStore()) @@ -141,7 +155,7 @@ # get uncompressed public key serialization public_key = private_key.get_pubkey().get_bytes() - def create_fund_and_spend_tx(dummy=OP_0, sigtype='ecdsa'): + def create_fund_and_spend_tx(dummy=OP_0, sigtype="ecdsa"): spendfrom = spendable_outputs.pop() script = CScript([OP_1, public_key, OP_1, OP_CHECKMULTISIG]) @@ -150,25 +164,23 @@ # Fund transaction txfund = create_tx_with_script( - spendfrom, 0, b'', amount=value, script_pub_key=script) + spendfrom, 0, b"", amount=value, script_pub_key=script + ) txfund.rehash() fundings.append(txfund) # Spend transaction txspend = CTransaction() - txspend.vout.append( - CTxOut(value - 1000, CScript([OP_TRUE]))) - txspend.vin.append( - CTxIn(COutPoint(txfund.sha256, 0), b'')) + txspend.vout.append(CTxOut(value - 1000, CScript([OP_TRUE]))) + txspend.vin.append(CTxIn(COutPoint(txfund.sha256, 0), b"")) # Sign the transaction sighashtype = SIGHASH_ALL | SIGHASH_FORKID - hashbyte = bytes([sighashtype & 0xff]) - sighash = SignatureHashForkId( - script, txspend, 0, sighashtype, value) - if sigtype == 'schnorr': + hashbyte = bytes([sighashtype & 0xFF]) + sighash = SignatureHashForkId(script, txspend, 0, sighashtype, value) + if sigtype == "schnorr": txsig = private_key.sign_schnorr(sighash) + hashbyte - elif sigtype == 'ecdsa': + elif sigtype == "ecdsa": txsig = private_key.sign_ecdsa(sighash) + hashbyte txspend.vin[0].scriptSig = CScript([dummy, txsig]) txspend.rehash() @@ -176,16 +188,16 @@ return txspend # This is valid. - ecdsa0tx = create_fund_and_spend_tx(OP_0, 'ecdsa') + ecdsa0tx = create_fund_and_spend_tx(OP_0, "ecdsa") # This is invalid. - ecdsa1tx = create_fund_and_spend_tx(OP_1, 'ecdsa') + ecdsa1tx = create_fund_and_spend_tx(OP_1, "ecdsa") # This is invalid. - schnorr0tx = create_fund_and_spend_tx(OP_0, 'schnorr') + schnorr0tx = create_fund_and_spend_tx(OP_0, "schnorr") # This is valid. - schnorr1tx = create_fund_and_spend_tx(OP_1, 'schnorr') + schnorr1tx = create_fund_and_spend_tx(OP_1, "schnorr") tip = self.build_block(tip, fundings) node.p2ps[0].send_blocks_and_test([tip], node) @@ -196,36 +208,34 @@ self.log.info("Trying to mine a non-null-dummy ECDSA.") self.check_for_ban_on_rejected_block( - self.build_block(tip, [ecdsa1tx]), BADINPUTS_ERROR) + self.build_block(tip, [ecdsa1tx]), BADINPUTS_ERROR + ) self.log.info( - "If we try to submit it by mempool or RPC, it is rejected and we are banned") - assert_raises_rpc_error(-26, ECDSA_NULLDUMMY_ERROR, - node.sendrawtransaction, ToHex(ecdsa1tx)) - self.check_for_ban_on_rejected_tx( - ecdsa1tx, ECDSA_NULLDUMMY_ERROR) - - self.log.info( - "Submitting a Schnorr-multisig via net, and mining it in a block") + "If we try to submit it by mempool or RPC, it is rejected and we are banned" + ) + assert_raises_rpc_error( + -26, ECDSA_NULLDUMMY_ERROR, node.sendrawtransaction, ToHex(ecdsa1tx) + ) + self.check_for_ban_on_rejected_tx(ecdsa1tx, ECDSA_NULLDUMMY_ERROR) + + self.log.info("Submitting a Schnorr-multisig via net, and mining it in a block") node.p2ps[0].send_txs_and_test([schnorr1tx], node) - assert_equal(set(node.getrawmempool()), { - ecdsa0tx.hash, schnorr1tx.hash}) + assert_equal(set(node.getrawmempool()), {ecdsa0tx.hash, schnorr1tx.hash}) tip = self.build_block(tip, [schnorr1tx]) node.p2ps[0].send_blocks_and_test([tip], node) - self.log.info( - "That legacy ECDSA multisig is still in mempool, let's mine it") + self.log.info("That legacy ECDSA multisig is still in mempool, let's mine it") assert_equal(node.getrawmempool(), [ecdsa0tx.hash]) tip = self.build_block(tip, [ecdsa0tx]) node.p2ps[0].send_blocks_and_test([tip], node) assert_equal(node.getrawmempool(), []) - self.log.info( - "Trying Schnorr in legacy multisig is invalid and banworthy.") - self.check_for_ban_on_rejected_tx( - schnorr0tx, SCHNORR_LEGACY_MULTISIG_ERROR) + self.log.info("Trying Schnorr in legacy multisig is invalid and banworthy.") + self.check_for_ban_on_rejected_tx(schnorr0tx, SCHNORR_LEGACY_MULTISIG_ERROR) self.check_for_ban_on_rejected_block( - self.build_block(tip, [schnorr0tx]), BADINPUTS_ERROR) + self.build_block(tip, [schnorr0tx]), BADINPUTS_ERROR + ) -if __name__ == '__main__': +if __name__ == "__main__": SchnorrMultisigTest().main() diff --git a/test/functional/abc-segwit-recovery.py b/test/functional/abc-segwit-recovery.py --- a/test/functional/abc-segwit-recovery.py +++ b/test/functional/abc-segwit-recovery.py @@ -33,21 +33,24 @@ TEST_TIME = int(time.time()) # Error due to non clean stack -CLEANSTACK_ERROR = 'non-mandatory-script-verify-flag (Stack size must be exactly one after execution)' +CLEANSTACK_ERROR = ( + "non-mandatory-script-verify-flag (Stack size must be exactly one after execution)" +) RPC_CLEANSTACK_ERROR = CLEANSTACK_ERROR -EVAL_FALSE_ERROR = 'non-mandatory-script-verify-flag (Script evaluated without error but finished with a false/empty top stack elem' +EVAL_FALSE_ERROR = ( + "non-mandatory-script-verify-flag (Script evaluated without error but finished with" + " a false/empty top stack elem" +) RPC_EVAL_FALSE_ERROR = f"{EVAL_FALSE_ERROR}ent)" class PreviousSpendableOutput(object): - def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n class SegwitRecoveryTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True @@ -67,9 +70,10 @@ # that segwit spending txn are not resulting in bans, node_nonstd # doesn't get banned when forwarding this kind of transactions to # node_std. - self.extra_args = [['-whitelist=noban@127.0.0.1', - "-acceptnonstdtxn"], - ["-acceptnonstdtxn=0"]] + self.extra_args = [ + ["-whitelist=noban@127.0.0.1", "-acceptnonstdtxn"], + ["-acceptnonstdtxn=0"], + ] def make_block(self, base_block: Optional[CBlock]) -> CBlock: """ @@ -105,8 +109,7 @@ peer_std = node_std.add_p2p_connection(P2PDataStore()) # adds transactions to the block and updates state - def update_block(block: CBlock, - new_transactions: Sequence[CTransaction]): + def update_block(block: CBlock, new_transactions: Sequence[CTransaction]): block.vtx.extend(new_transactions) make_conform_to_ctor(block) block.hashMerkleRoot = block.calc_merkle_root() @@ -120,14 +123,16 @@ # Spending from a P2SH-P2WPKH coin, # txhash:a45698363249312f8d3d93676aa714be59b0bd758e62fa054fb1ea6218480691 redeem_script0 = bytearray.fromhex( - '0014fcf9969ce1c98a135ed293719721fb69f0b686cb') + "0014fcf9969ce1c98a135ed293719721fb69f0b686cb" + ) # Spending from a P2SH-P2WSH coin, # txhash:6b536caf727ccd02c395a1d00b752098ec96e8ec46c96bee8582be6b5060fa2f redeem_script1 = bytearray.fromhex( - '0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4') + "0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4" + ) else: - redeem_script0 = bytearray.fromhex('51020000') - redeem_script1 = bytearray.fromhex('53020080') + redeem_script0 = bytearray.fromhex("51020000") + redeem_script1 = bytearray.fromhex("53020080") redeem_scripts = [redeem_script0, redeem_script1] # Fund transaction to segwit addresses @@ -136,7 +141,10 @@ amount = (50 * COIN - 1000) // len(redeem_scripts) for redeem_script in redeem_scripts: txfund.vout.append( - CTxOut(amount, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL]))) + CTxOut( + amount, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL]) + ) + ) txfund.rehash() # Segwit spending transaction @@ -148,9 +156,14 @@ txspend = CTransaction() for i in range(len(redeem_scripts)): txspend.vin.append( - CTxIn(COutPoint(txfund.sha256, i), CScript([redeem_scripts[i]]))) - txspend.vout = [CTxOut(50 * COIN - 2000, - CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))] + CTxIn(COutPoint(txfund.sha256, i), CScript([redeem_scripts[i]])) + ) + txspend.vout = [ + CTxOut( + 50 * COIN - 2000, + CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]), + ) + ] txspend.rehash() return txfund, txspend @@ -171,13 +184,11 @@ # collect spendable outputs now to avoid cluttering the code later on out = [] for _ in range(100): - out.append( - PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)) + out.append(PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)) # Create segwit funding and spending transactions txfund, txspend = create_segwit_fund_and_spend_tx(out[0]) - txfund_case0, txspend_case0 = create_segwit_fund_and_spend_tx( - out[1], True) + txfund_case0, txspend_case0 = create_segwit_fund_and_spend_tx(out[1], True) # Mine txfund, as it can't go into node_std mempool because it's # nonstandard. @@ -190,25 +201,36 @@ # Check that upgraded nodes checking for standardness are not banning # nodes sending segwit spending txns. - peer_nonstd.send_txs_and_test([txspend], node_nonstd, success=False, - reject_reason=CLEANSTACK_ERROR) - peer_nonstd.send_txs_and_test([txspend_case0], node_nonstd, success=False, - reject_reason=EVAL_FALSE_ERROR) - peer_std.send_txs_and_test([txspend], node_std, success=False, - reject_reason=CLEANSTACK_ERROR) - peer_std.send_txs_and_test([txspend_case0], node_std, success=False, - reject_reason=EVAL_FALSE_ERROR) + peer_nonstd.send_txs_and_test( + [txspend], node_nonstd, success=False, reject_reason=CLEANSTACK_ERROR + ) + peer_nonstd.send_txs_and_test( + [txspend_case0], node_nonstd, success=False, reject_reason=EVAL_FALSE_ERROR + ) + peer_std.send_txs_and_test( + [txspend], node_std, success=False, reject_reason=CLEANSTACK_ERROR + ) + peer_std.send_txs_and_test( + [txspend_case0], node_std, success=False, reject_reason=EVAL_FALSE_ERROR + ) # Segwit recovery txns are never accepted into the mempool, # as they are included in standard flags. - assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR, - node_nonstd.sendrawtransaction, ToHex(txspend)) - assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR, - node_nonstd.sendrawtransaction, ToHex(txspend_case0)) - assert_raises_rpc_error(-26, RPC_CLEANSTACK_ERROR, - node_std.sendrawtransaction, ToHex(txspend)) - assert_raises_rpc_error(-26, RPC_EVAL_FALSE_ERROR, - node_std.sendrawtransaction, ToHex(txspend_case0)) + assert_raises_rpc_error( + -26, RPC_CLEANSTACK_ERROR, node_nonstd.sendrawtransaction, ToHex(txspend) + ) + assert_raises_rpc_error( + -26, + RPC_EVAL_FALSE_ERROR, + node_nonstd.sendrawtransaction, + ToHex(txspend_case0), + ) + assert_raises_rpc_error( + -26, RPC_CLEANSTACK_ERROR, node_std.sendrawtransaction, ToHex(txspend) + ) + assert_raises_rpc_error( + -26, RPC_EVAL_FALSE_ERROR, node_std.sendrawtransaction, ToHex(txspend_case0) + ) # Blocks containing segwit spending txns are accepted in both nodes. block = self.make_block(block) @@ -217,5 +239,5 @@ self.sync_blocks() -if __name__ == '__main__': +if __name__ == "__main__": SegwitRecoveryTest().main() diff --git a/test/functional/abc-sync-chain.py b/test/functional/abc-sync-chain.py --- a/test/functional/abc-sync-chain.py +++ b/test/functional/abc-sync-chain.py @@ -33,8 +33,7 @@ def set_test_params(self): self.num_nodes = 1 # Setting minimumchainwork makes sure we test IBD as well as post-IBD - self.extra_args = [ - [f"-minimumchainwork={202 + 2 * NUM_IBD_BLOCKS:#x}"]] + self.extra_args = [[f"-minimumchainwork={202 + 2 * NUM_IBD_BLOCKS:#x}"]] def run_test(self): node0 = self.nodes[0] @@ -42,7 +41,7 @@ tip = int(node0.getbestblockhash(), 16) height = node0.getblockcount() + 1 - time = node0.getblock(node0.getbestblockhash())['time'] + 1 + time = node0.getblock(node0.getbestblockhash())["time"] + 1 blocks = [] for _ in range(NUM_IBD_BLOCKS * 2): @@ -64,8 +63,9 @@ # The node should eventually, completely sync without getting stuck def node_synced(): return node0.getbestblockhash() == blocks[-1].hash + self.wait_until(node_synced) -if __name__ == '__main__': +if __name__ == "__main__": SyncChainTest().main() diff --git a/test/functional/abc-transaction-ordering.py b/test/functional/abc-transaction-ordering.py --- a/test/functional/abc-transaction-ordering.py +++ b/test/functional/abc-transaction-ordering.py @@ -24,21 +24,19 @@ class PreviousSpendableOutput: - def __init__(self, tx=CTransaction(), n=-1): self.tx = tx self.n = n # the output we're spending class TransactionOrderingTest(BitcoinTestFramework): - def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True self.block_heights = {} self.tip = None self.blocks = {} - self.extra_args = [['-whitelist=noban@127.0.0.1']] + self.extra_args = [["-whitelist=noban@127.0.0.1"]] def add_transactions_to_block(self, block, tx_list): [tx.rehash() for tx in tx_list] @@ -179,8 +177,12 @@ peer.send_blocks_and_test([block(5556)], node) # Block with regular ordering are now rejected. - peer.send_blocks_and_test([block( - 5557, out[17], tx_count=16)], node, success=False, reject_reason='tx-ordering') + peer.send_blocks_and_test( + [block(5557, out[17], tx_count=16)], + node, + success=False, + reject_reason="tx-ordering", + ) # Rewind bad block. self.set_tip(5556) @@ -199,11 +201,13 @@ # Generate a block with a duplicated transaction. double_tx_block = ordered_block(4447, out[19]) assert_equal(len(double_tx_block.vtx), 16) - double_tx_block.vtx = double_tx_block.vtx[:8] + \ - [double_tx_block.vtx[8]] + double_tx_block.vtx[8:] + double_tx_block.vtx = ( + double_tx_block.vtx[:8] + [double_tx_block.vtx[8]] + double_tx_block.vtx[8:] + ) update_block(4447) peer.send_blocks_and_test( - [self.tip], node, success=False, reject_reason='bad-txns-duplicate') + [self.tip], node, success=False, reject_reason="bad-txns-duplicate" + ) # Rewind bad block. self.set_tip(4446) @@ -215,12 +219,14 @@ replay_tx_block = ordered_block(4449, out[21]) assert_equal(len(replay_tx_block.vtx), 16) replay_tx_block.vtx.append(proper_block.vtx[5]) - replay_tx_block.vtx = [replay_tx_block.vtx[0]] + \ - sorted(replay_tx_block.vtx[1:], key=lambda tx: tx.get_id()) + replay_tx_block.vtx = [replay_tx_block.vtx[0]] + sorted( + replay_tx_block.vtx[1:], key=lambda tx: tx.get_id() + ) update_block(4449) peer.send_blocks_and_test( - [self.tip], node, success=False, reject_reason='bad-txns-BIP30') + [self.tip], node, success=False, reject_reason="bad-txns-BIP30" + ) -if __name__ == '__main__': +if __name__ == "__main__": TransactionOrderingTest().main()