diff --git a/src/txmempool.h b/src/txmempool.h --- a/src/txmempool.h +++ b/src/txmempool.h @@ -844,6 +844,9 @@ * Instead, store the disconnected transactions (in order!) as we go, remove any * that are included in blocks in the new chain, and then process the remaining * still-unconfirmed transactions at the end. + * + * It also enables efficient reprocessing of current mempool entries, specially + * useful on reorgs that results in in-mempool transactions becoming invalid */ // multi_index tag names struct txid_index {}; @@ -894,6 +897,11 @@ return queuedTx; } + // Import current mempool entries in topological order. Caller must clear + // the mempool and call updateMempoolForReorg to reprocess these + // transactions + void importMempool(); + // Add entries for a block while reconstructing the topological ordering so // they can be added back to the mempool simply. void addForBlock(const std::vector &vtx); diff --git a/src/txmempool.cpp b/src/txmempool.cpp --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -1321,6 +1321,35 @@ } } +void DisconnectedBlockTransactions::importMempool() { + // addForBlock's algorithm sorts a vector of transactions back into + // topological order. We use it in a separate object to create a valid + // ordering of all mempool transactions, which we then splice in front of + // the current queuedTx. This results in a valid sequence of transactions to + // be reprocessed in updateMempoolForReorg + DisconnectedBlockTransactions ordered_mempool; + std::vector vtx; + vtx.reserve(g_mempool.mapTx.size()); + for (const CTxMemPoolEntry &e : g_mempool.mapTx) { + vtx.push_back(e.GetSharedTx()); + } + ordered_mempool.addForBlock(vtx); + cachedInnerUsage += ordered_mempool.cachedInnerUsage; + queuedTx.get().splice( + queuedTx.get().begin(), + ordered_mempool.queuedTx.get()); + + // We limit memory usage because we can't know if this block is the last one + // being disconnected. Right now, the dynamic memory usage of these mempool + // transactions is being double accounted for, both here and in g_mempool. + // Once the caller clears g_mempool, this estimate becomes coherent again + while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) { + // Drop the earliest entry which, by definition, has no children + auto it = queuedTx.get().begin(); + removeEntry(it); + } +} + void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config, bool fAddToMempool) { AssertLockHeld(cs_main); diff --git a/src/validation.cpp b/src/validation.cpp --- a/src/validation.cpp +++ b/src/validation.cpp @@ -310,6 +310,11 @@ return IsMagneticAnomalyEnabled(config, chainActive.Tip()); } +static bool IsGreatWallEnabledForCurrentBlock(const Config &config) { + AssertLockHeld(cs_main); + return IsGreatWallEnabled(config, chainActive.Tip()); +} + // Command-line argument "-replayprotectionactivationtime=" will // cause the node to switch to replay protected SigHash ForkID value when the // median timestamp of the previous 11 blocks is greater than or equal to @@ -635,6 +640,12 @@ extraFlags |= SCRIPT_ENABLE_CHECKDATASIG; } + if (IsGreatWallEnabledForCurrentBlock(config)) { + if (!fRequireStandard) { + extraFlags |= SCRIPT_ALLOW_SEGWIT_RECOVERY; + } + } + // Check inputs based on the set of flags we activate. uint32_t scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; if (!config.GetChainParams().RequireStandard()) { @@ -1187,13 +1198,20 @@ // This differs from MANDATORY_SCRIPT_VERIFY_FLAGS as it contains // additional upgrade flags (see AcceptToMemoryPoolWorker variable // extraFlags). + // Even though CLEANSTACK is not mandatory as of yet, it might + // become in a future revision. Since ALLOW_SEGWIT_RECOVERY + // allows a more permissive ruleset, we need to manually include + // it in here to recheck this input under the most permissible set + // of consensus rules uint32_t mandatoryFlags = - flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS; + flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS | + SCRIPT_ALLOW_SEGWIT_RECOVERY; if (flags != mandatoryFlags) { // Check whether the failure was caused by a non-mandatory // script verification check. If so, don't trigger DoS // protection to avoid splitting the network on the basis of // relay policy disagreements. + // CScriptCheck check2(scriptPubKey, amount, tx, i, mandatoryFlags, sigCacheStore, txdata); if (check2()) { @@ -1571,6 +1589,12 @@ flags |= SCRIPT_VERIFY_CLEANSTACK; } + // If the Great Wall fork is enabled, we start accepting transactions + // recovering coins sent to segwit addresses + if (IsGreatWallEnabled(config, pChainTip)) { + flags |= SCRIPT_ALLOW_SEGWIT_RECOVERY; + } + // We make sure this node will have replay protection during the next hard // fork. if (IsReplayProtectionEnabled(config, pChainTip)) { @@ -2165,6 +2189,10 @@ * If disconnectpool is nullptr, then no disconnected transactions are added to * disconnectpool (note that the caller is responsible for mempool consistency * in any case). + * + * In case of a reorg crossing a fork boundary, i.e. a change in applicable + * evaluation flags, it queues all mempool transactions in disconnectpool for + * future reprocessing in updateMempoolForReorg. The mempool is thus emptied. */ static bool DisconnectTip(const Config &config, CValidationState &state, DisconnectedBlockTransactions *disconnectpool) { @@ -2207,7 +2235,7 @@ // add the transaction of the block we just disconnected back. if (IsReplayProtectionEnabled(config, pindexDelete) && !IsReplayProtectionEnabled(config, pindexDelete->pprev)) { - LogPrint(BCLog::MEMPOOL, "Clearing mempool for reorg"); + LogPrint(BCLog::MEMPOOL, "Clearing mempool for reorg\n"); g_mempool.clear(); // While not strictly necessary, clearing the disconnect pool is also @@ -2218,6 +2246,19 @@ } } + // If this block is deactivating a fork, we enqueue all mempool transactions + // in front of disconnectpool for future reprocessing + if (GetBlockScriptFlags(config, pindexDelete) != + GetBlockScriptFlags(config, pindexDelete->pprev)) { + LogPrint(BCLog::MEMPOOL, + "%s mempool for a reorg crossing a fork boundary\n", + disconnectpool ? "Reprocessing" : "Clearing"); + if (disconnectpool) { + disconnectpool->importMempool(); + } + g_mempool.clear(); + } + if (disconnectpool) { disconnectpool->addForBlock(block.vtx); } diff --git a/test/functional/abc-segwit-recovery-activation.py b/test/functional/abc-segwit-recovery-activation.py new file mode 100755 --- /dev/null +++ b/test/functional/abc-segwit-recovery-activation.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python3 +# Copyright (c) 2019 The Bitcoin developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +""" +This test checks activation of the SCRIPT_ALLOW_SEGWIT_RECOVERY flag +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import satoshi_round, assert_equal, wait_until, connect_nodes, sync_blocks +from test_framework.comptool import TestManager, TestInstance, RejectResult +from test_framework.blocktools import * +from test_framework.script import * + +# far into the future +GREAT_WALL_START_TIME = 2000000000 + + +class SegwitRecoveryActivationTest(BitcoinTestFramework): + + def set_test_params(self): + self.num_nodes = 2 + self.setup_clean_chain = True + # To test for bans, we start a 2nd node + # Node 1 doesn't accept nonstdtxns nor has any whitelisted nodes. + self.extra_args = [["-whitelist=127.0.0.1", + "-greatwallactivationtime=%d" % GREAT_WALL_START_TIME, + "-acceptnonstdtxn", + "-replayprotectionactivationtime=%d" % (2 * GREAT_WALL_START_TIME)], + ["-greatwallactivationtime=%d" % GREAT_WALL_START_TIME, + "-acceptnonstdtxn=0", + "-replayprotectionactivationtime=%d" % (2 * GREAT_WALL_START_TIME)]] + + def setup_network(self): + # Network topology: mininode -> node0 -> node1 + self.setup_nodes() + connect_nodes(self.nodes[0], self.nodes[1]) + self.sync_all() + + def run_test(self): + self.test = TestManager(self, self.options.tmpdir) + # mininode only connects to node0. node1 is connected to node0 + self.test.add_all_connections([self.nodes[0]]) + network_thread_start() + self.test.run() + + # Mines and returns 2 funding transactions: + # 1) A: spends to 1..N P2SH addresses, given by their redeem_scripts + # 2) B: spends to a standard P2SH address (redeem script: OP_TRUE), to test + # chains of standard transactions + def mine_funding_txns(self, redeem_scripts): + node = self.nodes[0] + utxos = node.listunspent() + assert(len(utxos) > 1) + # Creates txA, spending to P2SH addresses + utxo = utxos[0] + txA = CTransaction() + value = int(satoshi_round(utxo["amount"]) * COIN) // len( + redeem_scripts) + txA.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]))] + for redeem_script in redeem_scripts: + txA.vout.append( + CTxOut(value, CScript([OP_HASH160, hash160(redeem_script), OP_EQUAL]))) + txA.vout[0].nValue -= node.calculate_fee(txA) + txA_signed = node.signrawtransaction(ToHex(txA))["hex"] + txA = FromHex(CTransaction(), txA_signed) + txA.rehash() + # Creates txB, spending to P2SH(OP_TRUE) + utxo = utxos[1] + txB = CTransaction() + value = int(satoshi_round(utxo["amount"]) * COIN) + txB.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]))] + txB.vout = [ + CTxOut(value, CScript([OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL]))] + txB.vout[0].nValue -= node.calculate_fee(txB) + txB_signed = node.signrawtransaction(ToHex(txB))["hex"] + txB = FromHex(CTransaction(), txB_signed) + txB.rehash() + # Mines a block with both transactions + node.sendrawtransaction(txA_signed) + node.sendrawtransaction(txB_signed) + block_hash = node.generate(1)[0] + block_txns = node.getblock(block_hash)['tx'] + assert(txA.hash in block_txns and txB.hash in block_txns) + return txA, txB + + def get_tests(self): + node = self.nodes[0] + node1 = self.nodes[1] + + def next_block(block_time, spend_txns=None): + # get block height + blockchaininfo = node.getblockchaininfo() + height = int(blockchaininfo['blocks']) + + # create the block + coinbase = create_coinbase(height) + coinbase.rehash() + block = create_block( + int(node.getbestblockhash(), 16), coinbase, block_time) + if spend_txns: + # Add these tnxs in this block and recompute the merkle root + # We ignore the transaction fees + block.vtx.extend( + sorted(spend_txns, key=lambda tx: tx.get_id())) + block.hashMerkleRoot = block.calc_merkle_root() + + # Do PoW, which is cheap on regnet + block.solve() + return block + + # returns a test case that asserts that the current tip was accepted + def accepted(tip): + return TestInstance([[tip, True]]) + + # returns a test case that asserts that the current tip was rejected + def rejected(tip, reject=None): + if reject is None: + return TestInstance([[tip, False]]) + else: + return TestInstance([[tip, reject]]) + + # returns a test case that asserts that the transaction was accepted + def tx_accepted(tx): + return TestInstance([[tx, True]]) + + # returns a test case that asserts that the transaction was rejected + def tx_rejected(tx, reject): + return TestInstance([[tx, reject]]) + + # First, we generate some coins to spend. + node.generate(125) + + # To make sure we'll be able to recover coins sent to segwit addresses, + # we test using historical recoveries from btc.com: + # Spending from a P2SH-P2WPKH coin, + # txhash:a45698363249312f8d3d93676aa714be59b0bd758e62fa054fb1ea6218480691 + redeem_script0 = bytearray.fromhex( + '0014fcf9969ce1c98a135ed293719721fb69f0b686cb') + # Spending from a P2SH-P2WSH coin, + # txhash:6b536caf727ccd02c395a1d00b752098ec96e8ec46c96bee8582be6b5060fa2f + redeem_script1 = bytearray.fromhex( + '0020fc8b08ed636cb23afcb425ff260b3abd03380a2333b54cfa5d51ac52d803baf4') + redeem_scripts = [redeem_script0, redeem_script1] + + # Mine a block that creates utxos in segwit addresses and in a standard + # P2SH address + tx_fund_segwit, tx_fund_std = self.mine_funding_txns(redeem_scripts) + + # Returns a transaction that spends from segwit addresses to segwit + # addresses again. Used to create a chain of segwit spending txns + def create_txn_segwit_to_segwit_address(txin, redeem_scripts): + tx = CTransaction() + for i in range(len(redeem_scripts)): + tx.vin.append( + CTxIn(COutPoint(txin.sha256, i), CScript([redeem_scripts[i]]))) + tx.vout.append( + CTxOut(txin.vout[i].nValue, CScript([OP_HASH160, hash160(redeem_scripts[i]), OP_EQUAL]))) + tx.vout[0].nValue -= node.calculate_fee(tx) + tx.rehash() + return tx + + # Returns a transaction that simply spends a P2SH(OP_TRUE) utxo into another + # P2SH(OP_TRUE) utxo. Used to create a chain of standard txns + def create_txn_1_to_1(txin): + tx = CTransaction() + tx.vin = [ + CTxIn(COutPoint(txin.sha256, 0), CScript([CScript([OP_TRUE])]))] + tx.vout = [CTxOut( + txin.vout[0].nValue, CScript( + [OP_HASH160, hash160(CScript([OP_TRUE])), OP_EQUAL])), + CTxOut(0, CScript([OP_RETURN, random.getrandbits(800)]))] + tx.vout[0].nValue -= node.calculate_fee(tx) + tx.rehash() + return tx + + # Push MTP forward just before activation. + node.setmocktime(GREAT_WALL_START_TIME) + node1.setmocktime(GREAT_WALL_START_TIME) + + for i in range(6): + b = next_block(GREAT_WALL_START_TIME + i - 1) + yield accepted(b) + + assert_equal( + node.getblockheader(node.getbestblockhash())['mediantime'], + GREAT_WALL_START_TIME - 1) + + # Check that segwit spending tx is not acceptable into the mempool + self.log.info("Try to push a segwit spending txn into the mempool " + "before activation") + tx_spend_sw = create_txn_segwit_to_segwit_address( + tx_fund_segwit, redeem_scripts) + yield tx_rejected(tx_spend_sw, RejectResult(64, b'non-mandatory-script-verify-flag (Script did not clean its stack)')) + + # Check that segwit spending tx is not acceptable in a block + self.log.info("Try to broadcast a block containing segwit spending txn " + "before activation") + b = next_block(GREAT_WALL_START_TIME + 6, [tx_spend_sw]) + yield rejected(b, RejectResult(16, b'blk-bad-inputs')) + + # Activates the fork + self.log.info("Activate Great Wall") + fork_block = next_block(GREAT_WALL_START_TIME + 6) + yield accepted(fork_block) + + assert_equal( + node.getblockheader(node.getbestblockhash())['mediantime'], + GREAT_WALL_START_TIME) + + # Deactivates the fork in node1. + # Guarantee they are connected and in sync + assert(len(node1.getpeerinfo()) == 1) + sync_blocks(self.nodes) + node1.invalidateblock(fork_block.hash) + + # Check that segwit spending tx is acceptable into the mempool since + # standardness checks were disabled + self.log.info("Try to push a segwit spending txn into the mempool " + "after activation") + yield tx_accepted(tx_spend_sw) + + # We'll check if a node that hasn't activated the fork will ban a node that has. + # We broadcast a standard tx. Node0 will accept both the previously sent segwit + # spending txn and this standard txn and will send them to node1. Node1 hasn't + # activated, so it must not accept the segwit spending tx + self.log.info("Check if a node that hasn't activated the fork is banning " + "a node that has") + tx_spend_std = create_txn_1_to_1(tx_fund_std) + node.sendrawtransaction(ToHex(tx_spend_std)) + assert(tx_spend_std.hash in node.getrawmempool()) + + # Tests that node1 has processed both transactions by checking the + # presence of the 2nd txn (tx_spend_std) in its mempool + wait_until( + lambda: tx_spend_std.hash in node1.getrawmempool(), timeout=10) + mempool = node1.getrawmempool() + assert( + tx_spend_sw.hash not in mempool and tx_spend_std.hash in mempool) + # Check that node1 is still connected to node0 + assert(len(node1.getpeerinfo()) == 1) + # Now we let node1 sync with node0 again + node1.reconsiderblock(fork_block.hash) + sync_blocks(self.nodes) + + # Check that segwit spending txns are acceptable in new blocks. We mine + # a few blocks, creating two chains of transactions: segwit spending + # txns and standard txns. Each block has one transaction from each chain. + # We keep track of both chains to later check the mempool eviction logic + # on a reorg + self.log.info("Try to broadcast blocks containing segwit spending tnx " + "after activation") + std_tx_ids = [] + segwit_spending_tx_ids = [] + for i in range(5): + # We already have tx_spend_sw and tx_spend_std for the first + # iteration + if i > 0: + tx_spend_sw = create_txn_segwit_to_segwit_address( + tx_spend_sw, redeem_scripts) + tx_spend_std = create_txn_1_to_1(tx_spend_std) + b = next_block( + GREAT_WALL_START_TIME + 7, [tx_spend_sw, tx_spend_std]) + yield accepted(b) + segwit_spending_tx_ids.append(tx_spend_sw.hash) + std_tx_ids.append(tx_spend_std.hash) + + # Add a chain of unconfirmed txns in the mempool for the reorg test + for _ in range(5): + tx_spend_sw = create_txn_segwit_to_segwit_address( + tx_spend_sw, redeem_scripts) + tx_spend_std = create_txn_1_to_1(tx_spend_std) + node.sendrawtransaction(ToHex(tx_spend_sw)) + node.sendrawtransaction(ToHex(tx_spend_std)) + mempool = node.getrawmempool() + assert( + tx_spend_sw.hash in mempool and tx_spend_std.hash in mempool) + segwit_spending_tx_ids.append(tx_spend_sw.hash) + std_tx_ids.append(tx_spend_std.hash) + + # Check if a node that is checking for standardness is banning a node that isn't. + # Tests that node1 has processed the last tx_spend_sw by checking the + # presence of tx_spend_std (sent later) in its mempool + self.log.info("Check if a node that is checking for standardness is banning " + "a node that isn't") + wait_until( + lambda: tx_spend_std.hash in node1.getrawmempool(), timeout=10) + mempool = node1.getrawmempool() + assert( + tx_spend_sw.hash not in mempool and tx_spend_std.hash in mempool) + # Check that node1 is still connected to node0 + assert(len(node1.getpeerinfo()) == 1) + + # Cause a reorg to right after the fork was activated and check the + # mempool + self.log.info("Cause a reorg that doesn't deactivate the fork and " + "check the mempool") + node.invalidateblock(node.getblock(fork_block.hash)['nextblockhash']) + # Check that both segwit spending and standard txns have returned to the + # mempool + assert(set(node.getrawmempool()) + == set(std_tx_ids + segwit_spending_tx_ids)) + + # Cause a reorg to check the mempool in case the fork was deactivated + self.log.info("Cause a reorg that deactivates the fork and check segwit " + "spending txns were evicted from the mempool") + node.invalidateblock(fork_block.hash) + + # Check that segwit spending txns have been evicted from the mempool + assert(set(node.getrawmempool()) == set(std_tx_ids)) + +if __name__ == '__main__': + SegwitRecoveryActivationTest().main()