diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index e31c923a6..a0d9daebb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,3853 +1,3855 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "net_processing.h"
 
 #include "addrman.h"
 #include "arith_uint256.h"
 #include "blockencodings.h"
 #include "chainparams.h"
 #include "config.h"
 #include "consensus/validation.h"
 #include "hash.h"
 #include "init.h"
 #include "merkleblock.h"
 #include "net.h"
 #include "netbase.h"
 #include "netmessagemaker.h"
 #include "policy/fees.h"
 #include "policy/policy.h"
 #include "primitives/block.h"
 #include "primitives/transaction.h"
 #include "random.h"
 #include "tinyformat.h"
 #include "txmempool.h"
 #include "ui_interface.h"
 #include "util.h"
 #include "utilmoneystr.h"
 #include "utilstrencodings.h"
 #include "validation.h"
 #include "validationinterface.h"
 
 #include <boost/range/adaptor/reversed.hpp>
 #include <boost/thread.hpp>
 
 #if defined(NDEBUG)
 #error "Bitcoin cannot be compiled without assertions."
 #endif
 
 // Used only to inform the wallet of when we last received a block.
 std::atomic<int64_t> nTimeBestReceived(0);
 
 struct IteratorComparator {
     template <typename I> bool operator()(const I &a, const I &b) {
         return &(*a) < &(*b);
     }
 };
 
 struct COrphanTx {
     // When modifying, adapt the copy of this definition in tests/DoS_tests.
     CTransactionRef tx;
     NodeId fromPeer;
     int64_t nTimeExpire;
 };
 std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
 std::map<COutPoint,
          std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>>
     mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
 void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 static size_t vExtraTxnForCompactIt = 0;
 static std::vector<std::pair<uint256, CTransactionRef>>
     vExtraTxnForCompact GUARDED_BY(cs_main);
 
 // SHA256("main address relay")[0:8]
 static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
 
 // Internal stuff
 namespace {
 /** Number of nodes with fSyncStarted. */
 int nSyncStarted = 0;
 
 /**
  * Sources of received blocks, saved to be able to send them reject messages or
  * ban them when processing happens afterwards. Protected by cs_main.
  * Set mapBlockSource[hash].second to false if the node should not be punished
  * if the block is invalid.
  */
 std::map<uint256, std::pair<NodeId, bool>> mapBlockSource;
 
 /**
  * Filter for transactions that were recently rejected by AcceptToMemoryPool.
  * These are not rerequested until the chain tip changes, at which point the
  * entire filter is reset. Protected by cs_main.
  *
  * Without this filter we'd be re-requesting txs from each of our peers,
  * increasing bandwidth consumption considerably. For instance, with 100 peers,
  * half of which relay a tx we don't accept, that might be a 50x bandwidth
  * increase. A flooding attacker attempting to roll-over the filter using
  * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have
  * fast peers, so we pick 120,000 to give our peers a two minute window to send
  * invs to us.
  *
  * Decreasing the false positive rate is fairly cheap, so we pick one in a
  * million to make it highly unlikely for users to have issues with this filter.
  *
  * Memory used: 1.3 MB
  */
 std::unique_ptr<CRollingBloomFilter> recentRejects;
 uint256 hashRecentRejectsChainTip;
 
 /** Blocks that are in flight, and that are in the queue to be downloaded.
  * Protected by cs_main. */
 struct QueuedBlock {
     uint256 hash;
     //!< Optional.
     const CBlockIndex *pindex;
     //!< Whether this block has validated headers at the time of request.
     bool fValidatedHeaders;
     //!< Optional, used for CMPCTBLOCK downloads
     std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
 };
 std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
     mapBlocksInFlight;
 
 /** Stack of nodes which we have set to announce using compact blocks */
 std::list<NodeId> lNodesAnnouncingHeaderAndIDs;
 
 /** Number of preferable block download peers. */
 int nPreferredDownload = 0;
 
 /** Number of peers from which we're downloading blocks. */
 int nPeersWithValidatedDownloads = 0;
 
 /** Relay map, protected by cs_main. */
 typedef std::map<uint256, CTransactionRef> MapRelay;
 MapRelay mapRelay;
 /** Expiration-time ordered list of (expire time, relay map entry) pairs,
  * protected by cs_main). */
 std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
 } // namespace
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Registration of network node signals.
 //
 
 namespace {
 
 struct CBlockReject {
     uint8_t chRejectCode;
     std::string strRejectReason;
     uint256 hashBlock;
 };
 
 /**
  * Maintain validation-specific state about nodes, protected by cs_main, instead
  * by CNode's own locks. This simplifies asynchronous operation, where
  * processing of incoming data is done after the ProcessMessage call returns,
  * and we're no longer holding the node's locks.
  */
 struct CNodeState {
     //! The peer's address
     const CService address;
     //! Whether we have a fully established connection.
     bool fCurrentlyConnected;
     //! Accumulated misbehaviour score for this peer.
     int nMisbehavior;
     //! Whether this peer should be disconnected and banned (unless
     //! whitelisted).
     bool fShouldBan;
     //! String name of this peer (debugging/logging purposes).
     const std::string name;
     //! List of asynchronously-determined block rejections to notify this peer
     //! about.
     std::vector<CBlockReject> rejects;
     //! The best known block we know this peer has announced.
     const CBlockIndex *pindexBestKnownBlock;
     //! The hash of the last unknown block this peer has announced.
     uint256 hashLastUnknownBlock;
     //! The last full block we both have.
     const CBlockIndex *pindexLastCommonBlock;
     //! The best header we have sent our peer.
     const CBlockIndex *pindexBestHeaderSent;
     //! Length of current-streak of unconnecting headers announcements
     int nUnconnectingHeaders;
     //! Whether we've started headers synchronization with this peer.
     bool fSyncStarted;
     //! Since when we're stalling block download progress (in microseconds), or
     //! 0.
     int64_t nStallingSince;
     std::list<QueuedBlock> vBlocksInFlight;
     //! When the first entry in vBlocksInFlight started downloading. Don't care
     //! when vBlocksInFlight is empty.
     int64_t nDownloadingSince;
     int nBlocksInFlight;
     int nBlocksInFlightValidHeaders;
     //! Whether we consider this a preferred download peer.
     bool fPreferredDownload;
     //! Whether this peer wants invs or headers (when possible) for block
     //! announcements.
     bool fPreferHeaders;
     //! Whether this peer wants invs or cmpctblocks (when possible) for block
     //! announcements.
     bool fPreferHeaderAndIDs;
     /**
      * Whether this peer will send us cmpctblocks if we request them.
      * This is not used to gate request logic, as we really only care about
      * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the
      * version of compact blocks we send.
      */
     bool fProvidesHeaderAndIDs;
     /**
      * If we've announced NODE_WITNESS to this peer: whether the peer sends
      * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends
      * non-witnesses in cmpctblocks/blocktxns.
      */
     bool fSupportsDesiredCmpctVersion;
 
     CNodeState(CAddress addrIn, std::string addrNameIn)
         : address(addrIn), name(addrNameIn) {
         fCurrentlyConnected = false;
         nMisbehavior = 0;
         fShouldBan = false;
         pindexBestKnownBlock = nullptr;
         hashLastUnknownBlock.SetNull();
         pindexLastCommonBlock = nullptr;
         pindexBestHeaderSent = nullptr;
         nUnconnectingHeaders = 0;
         fSyncStarted = false;
         nStallingSince = 0;
         nDownloadingSince = 0;
         nBlocksInFlight = 0;
         nBlocksInFlightValidHeaders = 0;
         fPreferredDownload = false;
         fPreferHeaders = false;
         fPreferHeaderAndIDs = false;
         fProvidesHeaderAndIDs = false;
         fSupportsDesiredCmpctVersion = false;
     }
 };
 
 /** Map maintaining per-node state. Requires cs_main. */
 std::map<NodeId, CNodeState> mapNodeState;
 
 // Requires cs_main.
 CNodeState *State(NodeId pnode) {
     std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
     if (it == mapNodeState.end()) {
         return nullptr;
     }
 
     return &it->second;
 }
 
 void UpdatePreferredDownload(CNode *node, CNodeState *state) {
     nPreferredDownload -= state->fPreferredDownload;
 
     // Whether this node should be marked as a preferred download node.
     state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) &&
                                 !node->fOneShot && !node->fClient;
 
     nPreferredDownload += state->fPreferredDownload;
 }
 
 void PushNodeVersion(const Config &config, CNode *pnode, CConnman &connman,
                      int64_t nTime) {
     ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
     uint64_t nonce = pnode->GetLocalNonce();
     int nNodeStartingHeight = pnode->GetMyStartingHeight();
     NodeId nodeid = pnode->GetId();
     CAddress addr = pnode->addr;
 
     CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr)
                             ? addr
                             : CAddress(CService(), addr.nServices));
     CAddress addrMe = CAddress(CService(), nLocalNodeServices);
 
     connman.PushMessage(pnode,
                         CNetMsgMaker(INIT_PROTO_VERSION)
                             .Make(NetMsgType::VERSION, PROTOCOL_VERSION,
                                   (uint64_t)nLocalNodeServices, nTime, addrYou,
                                   addrMe, nonce, userAgent(config),
                                   nNodeStartingHeight, ::fRelayTxes));
 
     if (fLogIPs) {
         LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, "
                              "us=%s, them=%s, peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
                  addrYou.ToString(), nodeid);
     } else {
         LogPrint(
             BCLog::NET,
             "send version message: version %d, blocks=%d, us=%s, peer=%d\n",
             PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
     }
 }
 
 void InitializeNode(const Config &config, CNode *pnode, CConnman &connman) {
     CAddress addr = pnode->addr;
     std::string addrName = pnode->GetAddrName();
     NodeId nodeid = pnode->GetId();
     {
         LOCK(cs_main);
         mapNodeState.emplace_hint(
             mapNodeState.end(), std::piecewise_construct,
             std::forward_as_tuple(nodeid),
             std::forward_as_tuple(addr, std::move(addrName)));
     }
 
     if (!pnode->fInbound) {
         PushNodeVersion(config, pnode, connman, GetTime());
     }
 }
 
 void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) {
     fUpdateConnectionTime = false;
     LOCK(cs_main);
     CNodeState *state = State(nodeid);
 
     if (state->fSyncStarted) {
         nSyncStarted--;
     }
 
     if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
         fUpdateConnectionTime = true;
     }
 
     for (const QueuedBlock &entry : state->vBlocksInFlight) {
         mapBlocksInFlight.erase(entry.hash);
     }
     // Get rid of stale mapBlockSource entries for this peer as they may leak
     // if we don't clean them up (I saw on the order of ~100 stale entries on
     // a full resynch in my testing -- these entries stay forever).
     // Performance note: most of the time mapBlockSource has 0 or 1 entries.
     // During synch of blockchain it may end up with as many as 1000 entries,
     // which still only takes ~1ms to iterate through on even old hardware.
     // So this memleak cleanup is not expensive and worth doing since even
     // small leaks are bad. :)
     for (auto it = mapBlockSource.begin(); it != mapBlockSource.end(); /*NA*/) {
         if (it->second.first == nodeid) {
             mapBlockSource.erase(it++);
         } else {
             ++it;
         }
     }
 
     EraseOrphansFor(nodeid);
     nPreferredDownload -= state->fPreferredDownload;
     nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
     assert(nPeersWithValidatedDownloads >= 0);
 
     mapNodeState.erase(nodeid);
 
     if (mapNodeState.empty()) {
         // Do a consistency check after the last peer is removed.
         assert(mapBlocksInFlight.empty());
         assert(nPreferredDownload == 0);
         assert(nPeersWithValidatedDownloads == 0);
     }
 }
 
 // Requires cs_main.
 // Returns a bool indicating whether we requested this block.
 // Also used if a block was /not/ received and timed out or started with another
 // peer.
 bool MarkBlockAsReceived(const uint256 &hash) {
     std::map<uint256,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end()) {
         CNodeState *state = State(itInFlight->second.first);
         state->nBlocksInFlightValidHeaders -=
             itInFlight->second.second->fValidatedHeaders;
         if (state->nBlocksInFlightValidHeaders == 0 &&
             itInFlight->second.second->fValidatedHeaders) {
             // Last validated block on the queue was received.
             nPeersWithValidatedDownloads--;
         }
         if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
             // First block on the queue was received, update the start download
             // time for the next one
             state->nDownloadingSince =
                 std::max(state->nDownloadingSince, GetTimeMicros());
         }
         state->vBlocksInFlight.erase(itInFlight->second.second);
         state->nBlocksInFlight--;
         state->nStallingSince = 0;
         mapBlocksInFlight.erase(itInFlight);
         return true;
     }
 
     return false;
 }
 
 // Requires cs_main.
 // returns false, still setting pit, if the block was already in flight from the
 // same peer pit will only be valid as long as the same cs_main lock is being
 // held.
 static bool
 MarkBlockAsInFlight(const Config &config, NodeId nodeid, const uint256 &hash,
                     const Consensus::Params &consensusParams,
                     const CBlockIndex *pindex = nullptr,
                     std::list<QueuedBlock>::iterator **pit = nullptr) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Short-circuit most stuff in case its from the same node.
     std::map<uint256,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end() &&
         itInFlight->second.first == nodeid) {
         *pit = &itInFlight->second.second;
         return false;
     }
 
     // Make sure it's not listed somewhere already.
     MarkBlockAsReceived(hash);
 
     std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
         state->vBlocksInFlight.end(),
         {hash, pindex, pindex != nullptr,
          std::unique_ptr<PartiallyDownloadedBlock>(
              pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)});
     state->nBlocksInFlight++;
     state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
     if (state->nBlocksInFlight == 1) {
         // We're starting a block download (batch) from this peer.
         state->nDownloadingSince = GetTimeMicros();
     }
 
     if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
         nPeersWithValidatedDownloads++;
     }
 
     itInFlight = mapBlocksInFlight
                      .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
                      .first;
 
     if (pit) {
         *pit = &itInFlight->second.second;
     }
 
     return true;
 }
 
 /** Check whether the last unknown block a peer advertised is not yet known. */
 void ProcessBlockAvailability(NodeId nodeid) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (!state->hashLastUnknownBlock.IsNull()) {
         BlockMap::iterator itOld =
             mapBlockIndex.find(state->hashLastUnknownBlock);
         if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
             if (state->pindexBestKnownBlock == nullptr ||
                 itOld->second->nChainWork >=
                     state->pindexBestKnownBlock->nChainWork) {
                 state->pindexBestKnownBlock = itOld->second;
             }
             state->hashLastUnknownBlock.SetNull();
         }
     }
 }
 
 /** Update tracking information about which blocks a peer is assumed to have. */
 void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     ProcessBlockAvailability(nodeid);
 
     BlockMap::iterator it = mapBlockIndex.find(hash);
     if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
         // An actually better block was announced.
         if (state->pindexBestKnownBlock == nullptr ||
             it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
             state->pindexBestKnownBlock = it->second;
         }
     } else {
         // An unknown block was announced; just assume that the latest one is
         // the best one.
         state->hashLastUnknownBlock = hash;
     }
 }
 
 void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman &connman) {
     AssertLockHeld(cs_main);
     CNodeState *nodestate = State(nodeid);
     if (!nodestate) {
         LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
         return;
     }
     if (!nodestate->fProvidesHeaderAndIDs) {
         return;
     }
     for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
          it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
         if (*it == nodeid) {
             lNodesAnnouncingHeaderAndIDs.erase(it);
             lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
             return;
         }
     }
     connman.ForNode(nodeid, [&connman](CNode *pfrom) {
         bool fAnnounceUsingCMPCTBLOCK = false;
         uint64_t nCMPCTBLOCKVersion = 1;
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
             // As per BIP152, we only get 3 of our peers to announce
             // blocks using compact encodings.
             connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(),
                             [&connman, fAnnounceUsingCMPCTBLOCK,
                              nCMPCTBLOCKVersion](CNode *pnodeStop) {
                                 connman.PushMessage(
                                     pnodeStop,
                                     CNetMsgMaker(pnodeStop->GetSendVersion())
                                         .Make(NetMsgType::SENDCMPCT,
                                               fAnnounceUsingCMPCTBLOCK,
                                               nCMPCTBLOCKVersion));
                                 return true;
                             });
             lNodesAnnouncingHeaderAndIDs.pop_front();
         }
         fAnnounceUsingCMPCTBLOCK = true;
         connman.PushMessage(pfrom,
                             CNetMsgMaker(pfrom->GetSendVersion())
                                 .Make(NetMsgType::SENDCMPCT,
                                       fAnnounceUsingCMPCTBLOCK,
                                       nCMPCTBLOCKVersion));
         lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
         return true;
     });
 }
 
 // Requires cs_main
 bool CanDirectFetch(const Consensus::Params &consensusParams) {
     return chainActive.Tip()->GetBlockTime() >
            GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
 }
 
 // Requires cs_main
 bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) {
     if (state->pindexBestKnownBlock &&
         pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
         return true;
     }
     if (state->pindexBestHeaderSent &&
         pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
         return true;
     }
     return false;
 }
 
 /**
  * Find the last common ancestor two blocks have.
  * Both pa and pb must be non null.
  */
 const CBlockIndex *LastCommonAncestor(const CBlockIndex *pa,
                                       const CBlockIndex *pb) {
     if (pa->nHeight > pb->nHeight) {
         pa = pa->GetAncestor(pb->nHeight);
     } else if (pb->nHeight > pa->nHeight) {
         pb = pb->GetAncestor(pa->nHeight);
     }
 
     while (pa != pb && pa && pb) {
         pa = pa->pprev;
         pb = pb->pprev;
     }
 
     // Eventually all chain branches meet at the genesis block.
     assert(pa == pb);
     return pa;
 }
 
 /** Update pindexLastCommonBlock and add not-in-flight missing successors to
  * vBlocks, until it has at most count entries. */
 void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
                               std::vector<const CBlockIndex *> &vBlocks,
                               NodeId &nodeStaller,
                               const Consensus::Params &consensusParams) {
     if (count == 0) {
         return;
     }
 
     vBlocks.reserve(vBlocks.size() + count);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Make sure pindexBestKnownBlock is up to date, we'll need it.
     ProcessBlockAvailability(nodeid);
 
     if (state->pindexBestKnownBlock == nullptr ||
         state->pindexBestKnownBlock->nChainWork <
             chainActive.Tip()->nChainWork) {
         // This peer has nothing interesting.
         return;
     }
 
     if (state->pindexLastCommonBlock == nullptr) {
         // Bootstrap quickly by guessing a parent of our best tip is the forking
         // point. Guessing wrong in either direction is not a problem.
         state->pindexLastCommonBlock = chainActive[std::min(
             state->pindexBestKnownBlock->nHeight, chainActive.Height())];
     }
 
     // If the peer reorganized, our previous pindexLastCommonBlock may not be an
     // ancestor of its current tip anymore. Go back enough to fix that.
     state->pindexLastCommonBlock = LastCommonAncestor(
         state->pindexLastCommonBlock, state->pindexBestKnownBlock);
     if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
         return;
     }
 
     std::vector<const CBlockIndex *> vToFetch;
     const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
     // Never fetch further than the best block we know the peer has, or more
     // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
     // common with this peer. The +1 is so we can detect stalling, namely if we
     // would be able to download that next block if the window were 1 larger.
     int nWindowEnd =
         state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
     int nMaxHeight =
         std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
     NodeId waitingfor = -1;
     while (pindexWalk->nHeight < nMaxHeight) {
         // Read up to 128 (or more, if more blocks than that are needed)
         // successors of pindexWalk (towards pindexBestKnownBlock) into
         // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
         // expensive as iterating over ~100 CBlockIndex* entries anyway.
         int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
                                 std::max<int>(count - vBlocks.size(), 128));
         vToFetch.resize(nToFetch);
         pindexWalk = state->pindexBestKnownBlock->GetAncestor(
             pindexWalk->nHeight + nToFetch);
         vToFetch[nToFetch - 1] = pindexWalk;
         for (unsigned int i = nToFetch - 1; i > 0; i--) {
             vToFetch[i - 1] = vToFetch[i]->pprev;
         }
 
         // Iterate over those blocks in vToFetch (in forward direction), adding
         // the ones that are not yet downloaded and not in flight to vBlocks. In
         // the mean time, update pindexLastCommonBlock as long as all ancestors
         // are already downloaded, or if it's already part of our chain (and
         // therefore don't need it even if pruned).
         for (const CBlockIndex *pindex : vToFetch) {
             if (!pindex->IsValid(BLOCK_VALID_TREE)) {
                 // We consider the chain that this peer is on invalid.
                 return;
             }
             if (pindex->nStatus & BLOCK_HAVE_DATA ||
                 chainActive.Contains(pindex)) {
                 if (pindex->nChainTx) {
                     state->pindexLastCommonBlock = pindex;
                 }
             } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
                 // The block is not already downloaded, and not yet in flight.
                 if (pindex->nHeight > nWindowEnd) {
                     // We reached the end of the window.
                     if (vBlocks.size() == 0 && waitingfor != nodeid) {
                         // We aren't able to fetch anything, but we would be if
                         // the download window was one larger.
                         nodeStaller = waitingfor;
                     }
                     return;
                 }
                 vBlocks.push_back(pindex);
                 if (vBlocks.size() == count) {
                     return;
                 }
             } else if (waitingfor == -1) {
                 // This is the first already-in-flight block.
                 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
             }
         }
     }
 }
 
 } // namespace
 
 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
     LOCK(cs_main);
     CNodeState *state = State(nodeid);
     if (state == nullptr) {
         return false;
     }
     stats.nMisbehavior = state->nMisbehavior;
     stats.nSyncHeight =
         state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
     stats.nCommonHeight = state->pindexLastCommonBlock
                               ? state->pindexLastCommonBlock->nHeight
                               : -1;
     for (const QueuedBlock &queue : state->vBlocksInFlight) {
         if (queue.pindex) {
             stats.vHeightInFlight.push_back(queue.pindex->nHeight);
         }
     }
     return true;
 }
 
 void RegisterNodeSignals(CNodeSignals &nodeSignals) {
     nodeSignals.ProcessMessages.connect(&ProcessMessages);
     nodeSignals.SendMessages.connect(&SendMessages);
     nodeSignals.InitializeNode.connect(&InitializeNode);
     nodeSignals.FinalizeNode.connect(&FinalizeNode);
 }
 
 void UnregisterNodeSignals(CNodeSignals &nodeSignals) {
     nodeSignals.ProcessMessages.disconnect(&ProcessMessages);
     nodeSignals.SendMessages.disconnect(&SendMessages);
     nodeSignals.InitializeNode.disconnect(&InitializeNode);
     nodeSignals.FinalizeNode.disconnect(&FinalizeNode);
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // mapOrphanTransactions
 //
 
 void AddToCompactExtraTransactions(const CTransactionRef &tx) {
     size_t max_extra_txn = GetArg("-blockreconstructionextratxn",
                                   DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
     if (max_extra_txn <= 0) {
         return;
     }
 
     if (!vExtraTxnForCompact.size()) {
         vExtraTxnForCompact.resize(max_extra_txn);
     }
 
     vExtraTxnForCompact[vExtraTxnForCompactIt] =
         std::make_pair(tx->GetId(), tx);
     vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
 }
 
 bool AddOrphanTx(const CTransactionRef &tx, NodeId peer)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     const uint256 &txid = tx->GetId();
     if (mapOrphanTransactions.count(txid)) {
         return false;
     }
 
     // Ignore big transactions, to avoid a send-big-orphans memory exhaustion
     // attack. If a peer has a legitimate large transaction with a missing
     // parent then we assume it will rebroadcast it later, after the parent
     // transaction(s) have been mined or received.
     // 100 orphans, each of which is at most 99,999 bytes big is at most 10
     // megabytes of orphans and somewhat more byprev index (in the worst case):
     unsigned int sz = GetTransactionSize(*tx);
     if (sz >= MAX_STANDARD_TX_SIZE) {
         LogPrint(BCLog::MEMPOOL,
                  "ignoring large orphan tx (size: %u, hash: %s)\n", sz,
                  txid.ToString());
         return false;
     }
 
     auto ret = mapOrphanTransactions.emplace(
         txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
     assert(ret.second);
     for (const CTxIn &txin : tx->vin) {
         mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
     }
 
     AddToCompactExtraTransactions(tx);
 
     LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n",
              txid.ToString(), mapOrphanTransactions.size(),
              mapOrphanTransactionsByPrev.size());
     return true;
 }
 
 static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<uint256, COrphanTx>::iterator it =
         mapOrphanTransactions.find(hash);
     if (it == mapOrphanTransactions.end()) {
         return 0;
     }
     for (const CTxIn &txin : it->second.tx->vin) {
         auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
         if (itPrev == mapOrphanTransactionsByPrev.end()) {
             continue;
         }
         itPrev->second.erase(it);
         if (itPrev->second.empty()) {
             mapOrphanTransactionsByPrev.erase(itPrev);
         }
     }
     mapOrphanTransactions.erase(it);
     return 1;
 }
 
 void EraseOrphansFor(NodeId peer) {
     int nErased = 0;
     std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
     while (iter != mapOrphanTransactions.end()) {
         // Increment to avoid iterator becoming invalid.
         std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
         if (maybeErase->second.fromPeer == peer) {
             nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
         }
     }
     if (nErased > 0) {
         LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased,
                  peer);
     }
 }
 
 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     unsigned int nEvicted = 0;
     static int64_t nNextSweep;
     int64_t nNow = GetTime();
     if (nNextSweep <= nNow) {
         // Sweep out expired orphan pool entries:
         int nErased = 0;
         int64_t nMinExpTime =
             nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
         std::map<uint256, COrphanTx>::iterator iter =
             mapOrphanTransactions.begin();
         while (iter != mapOrphanTransactions.end()) {
             std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
             if (maybeErase->second.nTimeExpire <= nNow) {
                 nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
             } else {
                 nMinExpTime =
                     std::min(maybeErase->second.nTimeExpire, nMinExpTime);
             }
         }
         // Sweep again 5 minutes after the next entry that expires in order to
         // batch the linear scan.
         nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
         if (nErased > 0) {
             LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n",
                      nErased);
         }
     }
     while (mapOrphanTransactions.size() > nMaxOrphans) {
         // Evict a random orphan:
         uint256 randomhash = GetRandHash();
         std::map<uint256, COrphanTx>::iterator it =
             mapOrphanTransactions.lower_bound(randomhash);
         if (it == mapOrphanTransactions.end()) {
             it = mapOrphanTransactions.begin();
         }
         EraseOrphanTx(it->first);
         ++nEvicted;
     }
     return nEvicted;
 }
 
 // Requires cs_main.
 void Misbehaving(NodeId pnode, int howmuch, const std::string &reason) {
     if (howmuch == 0) {
         return;
     }
 
     CNodeState *state = State(pnode);
     if (state == nullptr) {
         return;
     }
 
     state->nMisbehavior += howmuch;
     int banscore = GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
     if (state->nMisbehavior >= banscore &&
         state->nMisbehavior - howmuch < banscore) {
         LogPrintf(
             "%s: %s peer=%d (%d -> %d) reason: %s BAN THRESHOLD EXCEEDED\n",
             __func__, state->name, pnode, state->nMisbehavior - howmuch,
             state->nMisbehavior, reason.c_str());
         state->fShouldBan = true;
     } else {
         LogPrintf("%s: %s peer=%d (%d -> %d) reason: %s\n", __func__,
                   state->name, pnode, state->nMisbehavior - howmuch,
                   state->nMisbehavior, reason.c_str());
     }
 }
 
 // overloaded variant of above to operate on CNode*s
 static void Misbehaving(CNode *node, int howmuch, const std::string &reason) {
     Misbehaving(node->GetId(), howmuch, reason);
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // blockchain -> download logic notification
 //
 
 PeerLogicValidation::PeerLogicValidation(CConnman *connmanIn)
     : connman(connmanIn) {
     // Initialize global variables that cannot be constructed at startup.
     recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
 }
 
-void PeerLogicValidation::SyncTransaction(const CTransaction &tx,
-                                          const CBlockIndex *pindex,
-                                          int nPosInBlock) {
-    if (nPosInBlock == CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK) {
-        return;
-    }
-
+void PeerLogicValidation::BlockConnected(
+    const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex,
+    const std::vector<CTransactionRef> &vtxConflicted) {
     LOCK(cs_main);
 
     std::vector<uint256> vOrphanErase;
-    // Which orphan pool entries must we evict?
-    for (size_t j = 0; j < tx.vin.size(); j++) {
-        auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
-        if (itByPrev == mapOrphanTransactionsByPrev.end()) {
-            continue;
-        }
-        for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end();
-             ++mi) {
-            const CTransaction &orphanTx = *(*mi)->second.tx;
-            const uint256 &orphanId = orphanTx.GetId();
-            vOrphanErase.push_back(orphanId);
+
+    for (const CTransactionRef &ptx : pblock->vtx) {
+        const CTransaction &tx = *ptx;
+
+        // Which orphan pool entries must we evict?
+        for (size_t j = 0; j < tx.vin.size(); j++) {
+            auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout);
+            if (itByPrev == mapOrphanTransactionsByPrev.end()) {
+                continue;
+            }
+
+            for (auto mi = itByPrev->second.begin();
+                 mi != itByPrev->second.end(); ++mi) {
+                const CTransaction &orphanTx = *(*mi)->second.tx;
+                const uint256 &orphanHash = orphanTx.GetHash();
+                vOrphanErase.push_back(orphanHash);
+            }
         }
     }
 
     // Erase orphan transactions include or precluded by this block
     if (vOrphanErase.size()) {
         int nErased = 0;
         for (uint256 &orphanId : vOrphanErase) {
             nErased += EraseOrphanTx(orphanId);
         }
         LogPrint(BCLog::MEMPOOL,
                  "Erased %d orphan tx included or conflicted by block\n",
                  nErased);
     }
 }
 
 static CCriticalSection cs_most_recent_block;
 static std::shared_ptr<const CBlock> most_recent_block;
 static std::shared_ptr<const CBlockHeaderAndShortTxIDs>
     most_recent_compact_block;
 static uint256 most_recent_block_hash;
 
 void PeerLogicValidation::NewPoWValidBlock(
     const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
         std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
     const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
 
     LOCK(cs_main);
 
     static int nHighestFastAnnounce = 0;
     if (pindex->nHeight <= nHighestFastAnnounce) {
         return;
     }
     nHighestFastAnnounce = pindex->nHeight;
 
     uint256 hashBlock(pblock->GetHash());
 
     {
         LOCK(cs_most_recent_block);
         most_recent_block_hash = hashBlock;
         most_recent_block = pblock;
         most_recent_compact_block = pcmpctblock;
     }
 
     connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker,
                           &hashBlock](CNode *pnode) {
         // TODO: Avoid the repeated-serialization here
         if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) {
             return;
         }
         ProcessBlockAvailability(pnode->GetId());
         CNodeState &state = *State(pnode->GetId());
         // If the peer has, or we announced to them the previous block already,
         // but we don't think they have this one, go ahead and announce it.
         if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) &&
             PeerHasHeader(&state, pindex->pprev)) {
 
             LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n",
                      "PeerLogicValidation::NewPoWValidBlock",
                      hashBlock.ToString(), pnode->id);
             connman->PushMessage(
                 pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
             state.pindexBestHeaderSent = pindex;
         }
     });
 }
 
 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                           const CBlockIndex *pindexFork,
                                           bool fInitialDownload) {
     const int nNewHeight = pindexNew->nHeight;
     connman->SetBestHeight(nNewHeight);
 
     if (!fInitialDownload) {
         // Find the hashes of all blocks that weren't previously in the best
         // chain.
         std::vector<uint256> vHashes;
         const CBlockIndex *pindexToAnnounce = pindexNew;
         while (pindexToAnnounce != pindexFork) {
             vHashes.push_back(pindexToAnnounce->GetBlockHash());
             pindexToAnnounce = pindexToAnnounce->pprev;
             if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
                 // Limit announcements in case of a huge reorganization. Rely on
                 // the peer's synchronization mechanism in that case.
                 break;
             }
         }
         // Relay inventory, but don't relay old inventory during initial block
         // download.
         connman->ForEachNode([nNewHeight, &vHashes](CNode *pnode) {
             if (nNewHeight > (pnode->nStartingHeight != -1
                                   ? pnode->nStartingHeight - 2000
                                   : 0)) {
                 for (const uint256 &hash : boost::adaptors::reverse(vHashes)) {
                     pnode->PushBlockHash(hash);
                 }
             }
         });
         connman->WakeMessageHandler();
     }
 
     nTimeBestReceived = GetTime();
 }
 
 void PeerLogicValidation::BlockChecked(const CBlock &block,
                                        const CValidationState &state) {
     LOCK(cs_main);
 
     const uint256 hash(block.GetHash());
     std::map<uint256, std::pair<NodeId, bool>>::iterator it =
         mapBlockSource.find(hash);
 
     int nDoS = 0;
     if (state.IsInvalid(nDoS)) {
         // Don't send reject message with code 0 or an internal reject code.
         if (it != mapBlockSource.end() && State(it->second.first) &&
             state.GetRejectCode() > 0 &&
             state.GetRejectCode() < REJECT_INTERNAL) {
             CBlockReject reject = {
                 uint8_t(state.GetRejectCode()),
                 state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH),
                 hash};
             State(it->second.first)->rejects.push_back(reject);
             if (nDoS > 0 && it->second.second) {
                 Misbehaving(it->second.first, nDoS, state.GetRejectReason());
             }
         }
     }
     // Check that:
     // 1. The block is valid
     // 2. We're not in initial block download
     // 3. This is currently the best block we're aware of. We haven't updated
     //    the tip yet so we have no way to check this directly here. Instead we
     //    just check that there are currently no other blocks in flight.
     else if (state.IsValid() && !IsInitialBlockDownload() &&
              mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
         if (it != mapBlockSource.end()) {
             MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman);
         }
     }
 
     if (it != mapBlockSource.end()) {
         mapBlockSource.erase(it);
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Messages
 //
 
 static bool AlreadyHave(const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     switch (inv.type) {
         case MSG_TX: {
             assert(recentRejects);
             if (chainActive.Tip()->GetBlockHash() !=
                 hashRecentRejectsChainTip) {
                 // If the chain tip has changed previously rejected transactions
                 // might be now valid, e.g. due to a nLockTime'd tx becoming
                 // valid, or a double-spend. Reset the rejects filter and give
                 // those txs a second chance.
                 hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
                 recentRejects->reset();
             }
 
             // Use pcoinsTip->HaveCoinInCache as a quick approximation to
             // exclude requesting or processing some txs which have already been
             // included in a block. As this is best effort, we only check for
             // output 0 and 1. This works well enough in practice and we get
             // diminishing returns with 2 onward.
             return recentRejects->contains(inv.hash) ||
                    mempool.exists(inv.hash) ||
                    mapOrphanTransactions.count(inv.hash) ||
                    pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) ||
                    pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
         }
         case MSG_BLOCK:
             return mapBlockIndex.count(inv.hash);
     }
     // Don't know what it is, just say we already got one
     return true;
 }
 
 static void RelayTransaction(const CTransaction &tx, CConnman &connman) {
     CInv inv(MSG_TX, tx.GetId());
     connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); });
 }
 
 static void RelayAddress(const CAddress &addr, bool fReachable,
                          CConnman &connman) {
     // Limited relaying of addresses outside our network(s)
     unsigned int nRelayNodes = fReachable ? 2 : 1;
 
     // Relay to a limited number of other nodes.
     // Use deterministic randomness to send to the same nodes for 24 hours at a
     // time so the addrKnowns of the chosen nodes prevent repeats.
     uint64_t hashAddr = addr.GetHash();
     const CSipHasher hasher =
         connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
             .Write(hashAddr << 32)
             .Write((GetTime() + hashAddr) / (24 * 60 * 60));
     FastRandomContext insecure_rand;
 
     std::array<std::pair<uint64_t, CNode *>, 2> best{
         {{0, nullptr}, {0, nullptr}}};
     assert(nRelayNodes <= best.size());
 
     auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) {
         if (pnode->nVersion >= CADDR_TIME_VERSION) {
             uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize();
             for (unsigned int i = 0; i < nRelayNodes; i++) {
                 if (hashKey > best[i].first) {
                     std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
                               best.begin() + i + 1);
                     best[i] = std::make_pair(hashKey, pnode);
                     break;
                 }
             }
         }
     };
 
     auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
         for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
             best[i].second->PushAddress(addr, insecure_rand);
         }
     };
 
     connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
 }
 
 static void ProcessGetData(const Config &config, CNode *pfrom,
                            const Consensus::Params &consensusParams,
                            CConnman &connman,
                            const std::atomic<bool> &interruptMsgProc) {
     std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
     std::vector<CInv> vNotFound;
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     LOCK(cs_main);
 
     while (it != pfrom->vRecvGetData.end()) {
         // Don't bother if send buffer is too full to respond anyway.
         if (pfrom->fPauseSend) {
             break;
         }
 
         const CInv &inv = *it;
         {
             if (interruptMsgProc) {
                 return;
             }
 
             it++;
 
             if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK ||
                 inv.type == MSG_CMPCT_BLOCK) {
                 bool send = false;
                 BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
                 if (mi != mapBlockIndex.end()) {
                     if (mi->second->nChainTx &&
                         !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
                         mi->second->IsValid(BLOCK_VALID_TREE)) {
                         // If we have the block and all of its parents, but have
                         // not yet validated it, we might be in the middle of
                         // connecting it (ie in the unlock of cs_main before
                         // ActivateBestChain but after AcceptBlock). In this
                         // case, we need to run ActivateBestChain prior to
                         // checking the relay conditions below.
                         std::shared_ptr<const CBlock> a_recent_block;
                         {
                             LOCK(cs_most_recent_block);
                             a_recent_block = most_recent_block;
                         }
                         CValidationState dummy;
                         ActivateBestChain(config, dummy, a_recent_block);
                     }
                     if (chainActive.Contains(mi->second)) {
                         send = true;
                     } else {
                         static const int nOneMonth = 30 * 24 * 60 * 60;
                         // To prevent fingerprinting attacks, only send blocks
                         // outside of the active chain if they are valid, and no
                         // more than a month older (both in time, and in best
                         // equivalent proof of work) than the best header chain
                         // we know about.
                         send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
                                (pindexBestHeader != nullptr) &&
                                (pindexBestHeader->GetBlockTime() -
                                     mi->second->GetBlockTime() <
                                 nOneMonth) &&
                                (GetBlockProofEquivalentTime(
                                     *pindexBestHeader, *mi->second,
                                     *pindexBestHeader,
                                     consensusParams) < nOneMonth);
                         if (!send) {
                             LogPrintf("%s: ignoring request from peer=%i for "
                                       "old block that isn't in the main "
                                       "chain\n",
                                       __func__, pfrom->GetId());
                         }
                     }
                 }
 
                 // Disconnect node in case we have reached the outbound limit
                 // for serving historical blocks never disconnect whitelisted
                 // nodes.
                 // assume > 1 week = historical
                 static const int nOneWeek = 7 * 24 * 60 * 60;
                 if (send && connman.OutboundTargetReached(true) &&
                     (((pindexBestHeader != nullptr) &&
                       (pindexBestHeader->GetBlockTime() -
                            mi->second->GetBlockTime() >
                        nOneWeek)) ||
                      inv.type == MSG_FILTERED_BLOCK) &&
                     !pfrom->fWhitelisted) {
                     LogPrint(BCLog::NET, "historical block serving limit "
                                          "reached, disconnect peer=%d\n",
                              pfrom->GetId());
 
                     // disconnect node
                     pfrom->fDisconnect = true;
                     send = false;
                 }
                 // Pruned nodes may have deleted the block, so check whether
                 // it's available before trying to send.
                 if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) {
                     // Send block from disk
                     CBlock block;
                     if (!ReadBlockFromDisk(block, (*mi).second, config)) {
                         assert(!"cannot load block from disk");
                     }
 
                     if (inv.type == MSG_BLOCK) {
                         connman.PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::BLOCK, block));
                     } else if (inv.type == MSG_FILTERED_BLOCK) {
                         bool sendMerkleBlock = false;
                         CMerkleBlock merkleBlock;
                         {
                             LOCK(pfrom->cs_filter);
                             if (pfrom->pfilter) {
                                 sendMerkleBlock = true;
                                 merkleBlock =
                                     CMerkleBlock(block, *pfrom->pfilter);
                             }
                         }
                         if (sendMerkleBlock) {
                             connman.PushMessage(
                                 pfrom,
                                 msgMaker.Make(NetMsgType::MERKLEBLOCK,
                                               merkleBlock));
                             // CMerkleBlock just contains hashes, so also push
                             // any transactions in the block the client did not
                             // see. This avoids hurting performance by
                             // pointlessly requiring a round-trip. Note that
                             // there is currently no way for a node to request
                             // any single transactions we didn't send here -
                             // they must either disconnect and retry or request
                             // the full block. Thus, the protocol spec specified
                             // allows for us to provide duplicate txn here,
                             // however we MUST always provide at least what the
                             // remote peer needs.
                             typedef std::pair<unsigned int, uint256> PairType;
                             for (PairType &pair : merkleBlock.vMatchedTxn) {
                                 connman.PushMessage(
                                     pfrom,
                                     msgMaker.Make(NetMsgType::TX,
                                                   *block.vtx[pair.first]));
                             }
                         }
                         // else
                         // no response
                     } else if (inv.type == MSG_CMPCT_BLOCK) {
                         // If a peer is asking for old blocks, we're almost
                         // guaranteed they won't have a useful mempool to match
                         // against a compact block, and we don't feel like
                         // constructing the object for them, so instead we
                         // respond with the full, non-compact block.
                         int nSendFlags = 0;
                         if (CanDirectFetch(consensusParams) &&
                             mi->second->nHeight >=
                                 chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
                             CBlockHeaderAndShortTxIDs cmpctblock(block);
                             connman.PushMessage(
                                 pfrom,
                                 msgMaker.Make(nSendFlags,
                                               NetMsgType::CMPCTBLOCK,
                                               cmpctblock));
                         } else {
                             connman.PushMessage(pfrom,
                                                 msgMaker.Make(nSendFlags,
                                                               NetMsgType::BLOCK,
                                                               block));
                         }
                     }
 
                     // Trigger the peer node to send a getblocks request for the
                     // next batch of inventory.
                     if (inv.hash == pfrom->hashContinue) {
                         // Bypass PushInventory, this must send even if
                         // redundant, and we want it right after the last block
                         // so they don't wait for other stuff first.
                         std::vector<CInv> vInv;
                         vInv.push_back(
                             CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
                         connman.PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::INV, vInv));
                         pfrom->hashContinue.SetNull();
                     }
                 }
             } else if (inv.type == MSG_TX) {
                 // Send stream from relay memory
                 bool push = false;
                 auto mi = mapRelay.find(inv.hash);
                 int nSendFlags = 0;
                 if (mi != mapRelay.end()) {
                     connman.PushMessage(
                         pfrom,
                         msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
                     push = true;
                 } else if (pfrom->timeLastMempoolReq) {
                     auto txinfo = mempool.info(inv.hash);
                     // To protect privacy, do not answer getdata using the
                     // mempool when that TX couldn't have been INVed in reply to
                     // a MEMPOOL request.
                     if (txinfo.tx &&
                         txinfo.nTime <= pfrom->timeLastMempoolReq) {
                         connman.PushMessage(pfrom,
                                             msgMaker.Make(nSendFlags,
                                                           NetMsgType::TX,
                                                           *txinfo.tx));
                         push = true;
                     }
                 }
                 if (!push) {
                     vNotFound.push_back(inv);
                 }
             }
 
             // Track requests for our stuff.
             GetMainSignals().Inventory(inv.hash);
 
             if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK ||
                 inv.type == MSG_CMPCT_BLOCK) {
                 break;
             }
         }
     }
 
     pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
 
     if (!vNotFound.empty()) {
         // Let the peer know that we didn't find what it asked for, so it
         // doesn't have to wait around forever. Currently only SPV clients
         // actually care about this message: it's needed when they are
         // recursively walking the dependencies of relevant unconfirmed
         // transactions. SPV clients want to do that because they want to know
         // about (and store and rebroadcast and risk analyze) the dependencies
         // of transactions relevant to them, without having to download the
         // entire memory pool.
         connman.PushMessage(pfrom,
                             msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
     }
 }
 
 uint32_t GetFetchFlags(CNode *pfrom, const CBlockIndex *pprev,
                        const Consensus::Params &chainparams) {
     uint32_t nFetchFlags = 0;
     return nFetchFlags;
 }
 
 inline static void SendBlockTransactions(const CBlock &block,
                                          const BlockTransactionsRequest &req,
                                          CNode *pfrom, CConnman &connman) {
     BlockTransactions resp(req);
     for (size_t i = 0; i < req.indexes.size(); i++) {
         if (req.indexes[i] >= block.vtx.size()) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "out-of-bound-tx-index");
             LogPrintf(
                 "Peer %d sent us a getblocktxn with out-of-bounds tx indices",
                 pfrom->id);
             return;
         }
         resp.txn[i] = block.vtx[req.indexes[i]];
     }
     LOCK(cs_main);
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     int nSendFlags = 0;
     connman.PushMessage(pfrom,
                         msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
 }
 
 static bool ProcessMessage(const Config &config, CNode *pfrom,
                            const std::string &strCommand, CDataStream &vRecv,
                            int64_t nTimeReceived,
                            const CChainParams &chainparams, CConnman &connman,
                            const std::atomic<bool> &interruptMsgProc) {
     LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n",
              SanitizeString(strCommand), vRecv.size(), pfrom->id);
     if (IsArgSet("-dropmessagestest") &&
         GetRand(GetArg("-dropmessagestest", 0)) == 0) {
         LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
         return true;
     }
 
     if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
         (strCommand == NetMsgType::FILTERLOAD ||
          strCommand == NetMsgType::FILTERADD)) {
         if (pfrom->nVersion >= NO_BLOOM_VERSION) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "no-bloom-version");
             return false;
         } else {
             pfrom->fDisconnect = true;
             return false;
         }
     }
 
     if (strCommand == NetMsgType::REJECT) {
         if (LogAcceptCategory(BCLog::NET)) {
             try {
                 std::string strMsg;
                 uint8_t ccode;
                 std::string strReason;
                 vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >>
                     ccode >>
                     LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
 
                 std::ostringstream ss;
                 ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
 
                 if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) {
                     uint256 hash;
                     vRecv >> hash;
                     ss << ": hash " << hash.ToString();
                 }
                 LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
             } catch (const std::ios_base::failure &) {
                 // Avoid feedback loops by preventing reject messages from
                 // triggering a new reject message.
                 LogPrint(BCLog::NET, "Unparseable reject message received\n");
             }
         }
     }
 
     else if (strCommand == NetMsgType::VERSION) {
         // Each connection can only send one version message
         if (pfrom->nVersion != 0) {
             connman.PushMessage(
                 pfrom,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE,
                           std::string("Duplicate version message")));
             LOCK(cs_main);
             Misbehaving(pfrom, 1, "multiple-version");
             return false;
         }
 
         int64_t nTime;
         CAddress addrMe;
         CAddress addrFrom;
         uint64_t nNonce = 1;
         uint64_t nServiceInt;
         ServiceFlags nServices;
         int nVersion;
         int nSendVersion;
         std::string strSubVer;
         std::string cleanSubVer;
         int nStartingHeight = -1;
         bool fRelay = true;
 
         vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
         nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
         nServices = ServiceFlags(nServiceInt);
         if (!pfrom->fInbound) {
             connman.SetServices(pfrom->addr, nServices);
         }
         if (pfrom->nServicesExpected & ~nServices) {
             LogPrint(BCLog::NET, "peer=%d does not offer the expected services "
                                  "(%08x offered, %08x expected); "
                                  "disconnecting\n",
                      pfrom->id, nServices, pfrom->nServicesExpected);
             connman.PushMessage(
                 pfrom,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
                           strprintf("Expected to offer services %08x",
                                     pfrom->nServicesExpected)));
             pfrom->fDisconnect = true;
             return false;
         }
 
         if (nVersion < MIN_PEER_PROTO_VERSION) {
             // disconnect from peers older than this proto version
             LogPrintf("peer=%d using obsolete version %i; disconnecting\n",
                       pfrom->id, nVersion);
             connman.PushMessage(
                 pfrom,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
                           strprintf("Version must be %d or greater",
                                     MIN_PEER_PROTO_VERSION)));
             pfrom->fDisconnect = true;
             return false;
         }
 
         if (!vRecv.empty()) {
             vRecv >> addrFrom >> nNonce;
         }
         if (!vRecv.empty()) {
             vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
             cleanSubVer = SanitizeString(strSubVer);
         }
         if (!vRecv.empty()) {
             vRecv >> nStartingHeight;
         }
         if (!vRecv.empty()) {
             vRecv >> fRelay;
         }
         // Disconnect if we connected to ourself
         if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) {
             LogPrintf("connected to self at %s, disconnecting\n",
                       pfrom->addr.ToString());
             pfrom->fDisconnect = true;
             return true;
         }
 
         if (pfrom->fInbound && addrMe.IsRoutable()) {
             SeenLocal(addrMe);
         }
 
         // Be shy and don't send version until we hear
         if (pfrom->fInbound) {
             PushNodeVersion(config, pfrom, connman, GetAdjustedTime());
         }
 
         connman.PushMessage(
             pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
 
         pfrom->nServices = nServices;
         pfrom->SetAddrLocal(addrMe);
         {
             LOCK(pfrom->cs_SubVer);
             pfrom->strSubVer = strSubVer;
             pfrom->cleanSubVer = cleanSubVer;
         }
         pfrom->nStartingHeight = nStartingHeight;
         pfrom->fClient = !(nServices & NODE_NETWORK);
         {
             LOCK(pfrom->cs_filter);
             // set to true after we get the first filter* message
             pfrom->fRelayTxes = fRelay;
         }
 
         // Change version
         pfrom->SetSendVersion(nSendVersion);
         pfrom->nVersion = nVersion;
 
         // Potentially mark this peer as a preferred download peer.
         {
             LOCK(cs_main);
             UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
         }
 
         if (!pfrom->fInbound) {
             // Advertise our address
             if (fListen && !IsInitialBlockDownload()) {
                 CAddress addr =
                     GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
                 FastRandomContext insecure_rand;
                 if (addr.IsRoutable()) {
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     pfrom->PushAddress(addr, insecure_rand);
                 } else if (IsPeerAddrLocalGood(pfrom)) {
                     addr.SetIP(addrMe);
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     pfrom->PushAddress(addr, insecure_rand);
                 }
             }
 
             // Get recent addresses
             if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION ||
                 connman.GetAddressCount() < 1000) {
                 connman.PushMessage(
                     pfrom,
                     CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
                 pfrom->fGetAddr = true;
             }
             connman.MarkAddressGood(pfrom->addr);
         }
 
         std::string remoteAddr;
         if (fLogIPs) {
             remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
         }
 
         LogPrintf("receive version message: [%s] %s: version %d, blocks=%d, "
                   "us=%s, peer=%d%s\n",
                   pfrom->addr.ToString().c_str(), cleanSubVer, pfrom->nVersion,
                   pfrom->nStartingHeight, addrMe.ToString(), pfrom->id,
                   remoteAddr);
 
         int64_t nTimeOffset = nTime - GetTime();
         pfrom->nTimeOffset = nTimeOffset;
         AddTimeData(pfrom->addr, nTimeOffset);
 
         // If the peer is old enough to have the old alert system, send it the
         // final alert.
         if (pfrom->nVersion <= 70012) {
             CDataStream finalAlert(
                 ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffef"
                          "fff7f01ffffff7f00000000ffffff7f00ffffff7f002f55524745"
                          "4e543a20416c657274206b657920636f6d70726f6d697365642c2"
                          "075706772616465207265717569726564004630440220653febd6"
                          "410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3ab"
                          "d5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fec"
                          "aae66ecf689bf71b50"),
                 SER_NETWORK, PROTOCOL_VERSION);
             connman.PushMessage(
                 pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
         }
 
         // Feeler connections exist only to verify if address is online.
         if (pfrom->fFeeler) {
             assert(pfrom->fInbound == false);
             pfrom->fDisconnect = true;
         }
         return true;
     }
 
     else if (pfrom->nVersion == 0) {
         // Must have a version message before anything else
         LOCK(cs_main);
         Misbehaving(pfrom, 1, "missing-version");
         return false;
     }
 
     // At this point, the outgoing message serialization version can't change.
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
 
     if (strCommand == NetMsgType::VERACK) {
         pfrom->SetRecvVersion(
             std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
 
         if (!pfrom->fInbound) {
             // Mark this node as currently connected, so we update its timestamp
             // later.
             LOCK(cs_main);
             State(pfrom->GetId())->fCurrentlyConnected = true;
         }
 
         if (pfrom->nVersion >= SENDHEADERS_VERSION) {
             // Tell our peer we prefer to receive headers rather than inv's
             // We send this to non-NODE NETWORK peers as well, because even
             // non-NODE NETWORK peers can announce blocks (such as pruning
             // nodes)
             connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
         }
         if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
             // Tell our peer we are willing to provide version 1 or 2
             // cmpctblocks. However, we do not request new block announcements
             // using cmpctblock messages. We send this to non-NODE NETWORK peers
             // as well, because they may wish to request compact blocks from us.
             bool fAnnounceUsingCMPCTBLOCK = false;
             uint64_t nCMPCTBLOCKVersion = 1;
             connman.PushMessage(pfrom,
                                 msgMaker.Make(NetMsgType::SENDCMPCT,
                                               fAnnounceUsingCMPCTBLOCK,
                                               nCMPCTBLOCKVersion));
         }
         pfrom->fSuccessfullyConnected = true;
     }
 
     else if (!pfrom->fSuccessfullyConnected) {
         // Must have a verack message before anything else
         LOCK(cs_main);
         Misbehaving(pfrom, 1, "missing-verack");
         return false;
     }
 
     else if (strCommand == NetMsgType::ADDR) {
         std::vector<CAddress> vAddr;
         vRecv >> vAddr;
 
         // Don't want addr from older versions unless seeding
         if (pfrom->nVersion < CADDR_TIME_VERSION &&
             connman.GetAddressCount() > 1000) {
             return true;
         }
         if (vAddr.size() > 1000) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "oversized-addr");
             return error("message addr size() = %u", vAddr.size());
         }
 
         // Store the new addresses
         std::vector<CAddress> vAddrOk;
         int64_t nNow = GetAdjustedTime();
         int64_t nSince = nNow - 10 * 60;
         for (CAddress &addr : vAddr) {
             if (interruptMsgProc) {
                 return true;
             }
 
             if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) {
                 continue;
             }
 
             if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
                 addr.nTime = nNow - 5 * 24 * 60 * 60;
             }
             pfrom->AddAddressKnown(addr);
             bool fReachable = IsReachable(addr);
             if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 &&
                 addr.IsRoutable()) {
                 // Relay to a limited number of other nodes
                 RelayAddress(addr, fReachable, connman);
             }
             // Do not store addresses outside our network
             if (fReachable) {
                 vAddrOk.push_back(addr);
             }
         }
         connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
         if (vAddr.size() < 1000) {
             pfrom->fGetAddr = false;
         }
         if (pfrom->fOneShot) {
             pfrom->fDisconnect = true;
         }
     }
 
     else if (strCommand == NetMsgType::SENDHEADERS) {
         LOCK(cs_main);
         State(pfrom->GetId())->fPreferHeaders = true;
     }
 
     else if (strCommand == NetMsgType::SENDCMPCT) {
         bool fAnnounceUsingCMPCTBLOCK = false;
         uint64_t nCMPCTBLOCKVersion = 0;
         vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
         if (nCMPCTBLOCKVersion == 1) {
             LOCK(cs_main);
             // fProvidesHeaderAndIDs is used to "lock in" version of compact
             // blocks we send.
             if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
                 State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
             }
 
             State(pfrom->GetId())->fPreferHeaderAndIDs =
                 fAnnounceUsingCMPCTBLOCK;
             if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
                 State(pfrom->GetId())->fSupportsDesiredCmpctVersion = true;
             }
         }
     }
 
     else if (strCommand == NetMsgType::INV) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "oversized-inv");
             return error("message inv size() = %u", vInv.size());
         }
 
         bool fBlocksOnly = !fRelayTxes;
 
         // Allow whitelisted peers to send data other than blocks in blocks only
         // mode if whitelistrelay is true
         if (pfrom->fWhitelisted &&
             GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) {
             fBlocksOnly = false;
         }
 
         LOCK(cs_main);
 
         uint32_t nFetchFlags =
             GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus());
 
         std::vector<CInv> vToFetch;
 
         for (size_t nInv = 0; nInv < vInv.size(); nInv++) {
             CInv &inv = vInv[nInv];
 
             if (interruptMsgProc) {
                 return true;
             }
 
             bool fAlreadyHave = AlreadyHave(inv);
             LogPrint(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(),
                      fAlreadyHave ? "have" : "new", pfrom->id);
 
             if (inv.type == MSG_TX) {
                 inv.type |= nFetchFlags;
             }
 
             if (inv.type == MSG_BLOCK) {
                 UpdateBlockAvailability(pfrom->GetId(), inv.hash);
                 if (!fAlreadyHave && !fImporting && !fReindex &&
                     !mapBlocksInFlight.count(inv.hash)) {
                     // We used to request the full block here, but since
                     // headers-announcements are now the primary method of
                     // announcement on the network, and since, in the case that
                     // a node fell back to inv we probably have a reorg which we
                     // should get the headers for first, we now only provide a
                     // getheaders response here. When we receive the headers, we
                     // will then ask for the blocks we need.
                     connman.PushMessage(
                         pfrom,
                         msgMaker.Make(NetMsgType::GETHEADERS,
                                       chainActive.GetLocator(pindexBestHeader),
                                       inv.hash));
                     LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
                              pindexBestHeader->nHeight, inv.hash.ToString(),
                              pfrom->id);
                 }
             } else {
                 pfrom->AddInventoryKnown(inv);
                 if (fBlocksOnly) {
                     LogPrint(BCLog::NET, "transaction (%s) inv sent in "
                                          "violation of protocol peer=%d\n",
                              inv.hash.ToString(), pfrom->id);
                 } else if (!fAlreadyHave && !fImporting && !fReindex &&
                            !IsInitialBlockDownload()) {
                     pfrom->AskFor(inv);
                 }
             }
 
             // Track requests for our stuff
             GetMainSignals().Inventory(inv.hash);
         }
 
         if (!vToFetch.empty()) {
             connman.PushMessage(pfrom,
                                 msgMaker.Make(NetMsgType::GETDATA, vToFetch));
         }
     }
 
     else if (strCommand == NetMsgType::GETDATA) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-inv");
             return error("message getdata size() = %u", vInv.size());
         }
 
         LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
                  vInv.size(), pfrom->id);
 
         if (vInv.size() > 0) {
             LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
                      vInv[0].ToString(), pfrom->id);
         }
 
         pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(),
                                    vInv.end());
         ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman,
                        interruptMsgProc);
     }
 
     else if (strCommand == NetMsgType::GETBLOCKS) {
         CBlockLocator locator;
         uint256 hashStop;
         vRecv >> locator >> hashStop;
 
         // We might have announced the currently-being-connected tip using a
         // compact block, which resulted in the peer sending a getblocks
         // request, which we would otherwise respond to without the new block.
         // To avoid this situation we simply verify that we are on our best
         // known chain now. This is super overkill, but we handle it better
         // for getheaders requests, and there are no known nodes which support
         // compact blocks but still use getblocks to request blocks.
         {
             std::shared_ptr<const CBlock> a_recent_block;
             {
                 LOCK(cs_most_recent_block);
                 a_recent_block = most_recent_block;
             }
             CValidationState dummy;
             ActivateBestChain(config, dummy, a_recent_block);
         }
 
         LOCK(cs_main);
 
         // Find the last block the caller has in the main chain
         const CBlockIndex *pindex = FindForkInGlobalIndex(chainActive, locator);
 
         // Send the rest of the chain
         if (pindex) {
             pindex = chainActive.Next(pindex);
         }
         int nLimit = 500;
         LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
                  pfrom->id);
         for (; pindex; pindex = chainActive.Next(pindex)) {
             if (pindex->GetBlockHash() == hashStop) {
                 LogPrint(BCLog::NET, "  getblocks stopping at %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             // If pruning, don't inv blocks unless we have on disk and are
             // likely to still have for some reasonable time window (1 hour)
             // that block relay might require.
             const int nPrunedBlocksLikelyToHave =
                 MIN_BLOCKS_TO_KEEP -
                 3600 / chainparams.GetConsensus().nPowTargetSpacing;
             if (fPruneMode &&
                 (!(pindex->nStatus & BLOCK_HAVE_DATA) ||
                  pindex->nHeight <=
                      chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave)) {
                 LogPrint(
                     BCLog::NET,
                     " getblocks stopping, pruned or too old block at %d %s\n",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
             if (--nLimit <= 0) {
                 // When this block is requested, we'll send an inv that'll
                 // trigger the peer to getblocks the next batch of inventory.
                 LogPrint(BCLog::NET, "  getblocks stopping at limit %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 pfrom->hashContinue = pindex->GetBlockHash();
                 break;
             }
         }
     }
 
     else if (strCommand == NetMsgType::GETBLOCKTXN) {
         BlockTransactionsRequest req;
         vRecv >> req;
 
         std::shared_ptr<const CBlock> recent_block;
         {
             LOCK(cs_most_recent_block);
             if (most_recent_block_hash == req.blockhash) {
                 recent_block = most_recent_block;
             }
             // Unlock cs_most_recent_block to avoid cs_main lock inversion
         }
         if (recent_block) {
             SendBlockTransactions(*recent_block, req, pfrom, connman);
             return true;
         }
 
         LOCK(cs_main);
 
         BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
         if (it == mapBlockIndex.end() ||
             !(it->second->nStatus & BLOCK_HAVE_DATA)) {
             LogPrintf("Peer %d sent us a getblocktxn for a block we don't have",
                       pfrom->id);
             return true;
         }
 
         if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
             // If an older block is requested (should never happen in practice,
             // but can happen in tests) send a block response instead of a
             // blocktxn response. Sending a full block response instead of a
             // small blocktxn response is preferable in the case where a peer
             // might maliciously send lots of getblocktxn requests to trigger
             // expensive disk reads, because it will require the peer to
             // actually receive all the data read from disk over the network.
             LogPrint(BCLog::NET,
                      "Peer %d sent us a getblocktxn for a block > %i deep",
                      pfrom->id, MAX_BLOCKTXN_DEPTH);
             CInv inv;
             inv.type = MSG_BLOCK;
             inv.hash = req.blockhash;
             pfrom->vRecvGetData.push_back(inv);
             ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman,
                            interruptMsgProc);
             return true;
         }
 
         CBlock block;
         bool ret = ReadBlockFromDisk(block, it->second, config);
         assert(ret);
 
         SendBlockTransactions(block, req, pfrom, connman);
     }
 
     else if (strCommand == NetMsgType::GETHEADERS) {
         CBlockLocator locator;
         uint256 hashStop;
         vRecv >> locator >> hashStop;
 
         LOCK(cs_main);
         if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
             LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because "
                                  "node is in initial block download\n",
                      pfrom->id);
             return true;
         }
 
         CNodeState *nodestate = State(pfrom->GetId());
         const CBlockIndex *pindex = nullptr;
         if (locator.IsNull()) {
             // If locator is null, return the hashStop block
             BlockMap::iterator mi = mapBlockIndex.find(hashStop);
             if (mi == mapBlockIndex.end()) {
                 return true;
             }
             pindex = (*mi).second;
         } else {
             // Find the last block the caller has in the main chain
             pindex = FindForkInGlobalIndex(chainActive, locator);
             if (pindex) {
                 pindex = chainActive.Next(pindex);
             }
         }
 
         // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
         // count at the end
         std::vector<CBlock> vHeaders;
         int nLimit = MAX_HEADERS_RESULTS;
         LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id);
         for (; pindex; pindex = chainActive.Next(pindex)) {
             vHeaders.push_back(pindex->GetBlockHeader());
             if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
                 break;
             }
         }
         // pindex can be nullptr either if we sent chainActive.Tip() OR
         // if our peer has chainActive.Tip() (and thus we are sending an empty
         // headers message). In both cases it's safe to update
         // pindexBestHeaderSent to be our tip.
         //
         // It is important that we simply reset the BestHeaderSent value here,
         // and not max(BestHeaderSent, newHeaderSent). We might have announced
         // the currently-being-connected tip using a compact block, which
         // resulted in the peer sending a headers request, which we respond to
         // without the new block. By resetting the BestHeaderSent, we ensure we
         // will re-announce the new block via headers (or compact blocks again)
         // in the SendMessages logic.
         nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
         connman.PushMessage(pfrom,
                             msgMaker.Make(NetMsgType::HEADERS, vHeaders));
     }
 
     else if (strCommand == NetMsgType::TX) {
         // Stop processing the transaction early if
         // We are in blocks only mode and peer is either not whitelisted or
         // whitelistrelay is off
         if (!fRelayTxes &&
             (!pfrom->fWhitelisted ||
              !GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))) {
             LogPrint(BCLog::NET,
                      "transaction sent in violation of protocol peer=%d\n",
                      pfrom->id);
             return true;
         }
 
         std::deque<COutPoint> vWorkQueue;
         std::vector<uint256> vEraseQueue;
         CTransactionRef ptx;
         vRecv >> ptx;
         const CTransaction &tx = *ptx;
 
         CInv inv(MSG_TX, tx.GetId());
         pfrom->AddInventoryKnown(inv);
 
         LOCK(cs_main);
 
         bool fMissingInputs = false;
         CValidationState state;
 
         pfrom->setAskFor.erase(inv.hash);
         mapAlreadyAskedFor.erase(inv.hash);
 
         std::list<CTransactionRef> lRemovedTxn;
 
         if (!AlreadyHave(inv) &&
             AcceptToMemoryPool(config, mempool, state, ptx, true,
                                &fMissingInputs, &lRemovedTxn)) {
             mempool.check(pcoinsTip);
             RelayTransaction(tx, connman);
             for (size_t i = 0; i < tx.vout.size(); i++) {
                 vWorkQueue.emplace_back(inv.hash, i);
             }
 
             pfrom->nLastTXTime = GetTime();
 
             LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s "
                                      "(poolsz %u txn, %u kB)\n",
                      pfrom->id, tx.GetId().ToString(), mempool.size(),
                      mempool.DynamicMemoryUsage() / 1000);
 
             // Recursively process any orphan transactions that depended on this
             // one
             std::set<NodeId> setMisbehaving;
             while (!vWorkQueue.empty()) {
                 auto itByPrev =
                     mapOrphanTransactionsByPrev.find(vWorkQueue.front());
                 vWorkQueue.pop_front();
                 if (itByPrev == mapOrphanTransactionsByPrev.end()) {
                     continue;
                 }
                 for (auto mi = itByPrev->second.begin();
                      mi != itByPrev->second.end(); ++mi) {
                     const CTransactionRef &porphanTx = (*mi)->second.tx;
                     const CTransaction &orphanTx = *porphanTx;
                     const uint256 &orphanId = orphanTx.GetId();
                     NodeId fromPeer = (*mi)->second.fromPeer;
                     bool fMissingInputs2 = false;
                     // Use a dummy CValidationState so someone can't setup nodes
                     // to counter-DoS based on orphan resolution (that is,
                     // feeding people an invalid transaction based on LegitTxX
                     // in order to get anyone relaying LegitTxX banned)
                     CValidationState stateDummy;
 
                     if (setMisbehaving.count(fromPeer)) {
                         continue;
                     }
                     if (AcceptToMemoryPool(config, mempool, stateDummy,
                                            porphanTx, true, &fMissingInputs2,
                                            &lRemovedTxn)) {
                         LogPrint(BCLog::MEMPOOL, "   accepted orphan tx %s\n",
                                  orphanId.ToString());
                         RelayTransaction(orphanTx, connman);
                         for (size_t i = 0; i < orphanTx.vout.size(); i++) {
                             vWorkQueue.emplace_back(orphanId, i);
                         }
                         vEraseQueue.push_back(orphanId);
                     } else if (!fMissingInputs2) {
                         int nDos = 0;
                         if (stateDummy.IsInvalid(nDos) && nDos > 0) {
                             // Punish peer that gave us an invalid orphan tx
                             Misbehaving(fromPeer, nDos, "invalid-orphan-tx");
                             setMisbehaving.insert(fromPeer);
                             LogPrint(BCLog::MEMPOOL,
                                      "   invalid orphan tx %s\n",
                                      orphanId.ToString());
                         }
                         // Has inputs but not accepted to mempool
                         // Probably non-standard or insufficient fee/priority
                         LogPrint(BCLog::MEMPOOL, "   removed orphan tx %s\n",
                                  orphanId.ToString());
                         vEraseQueue.push_back(orphanId);
                         if (!stateDummy.CorruptionPossible()) {
                             // Do not use rejection cache for witness
                             // transactions or witness-stripped transactions, as
                             // they can have been malleated. See
                             // https://github.com/bitcoin/bitcoin/issues/8279
                             // for details.
                             assert(recentRejects);
                             recentRejects->insert(orphanId);
                         }
                     }
                     mempool.check(pcoinsTip);
                 }
             }
 
             for (uint256 hash : vEraseQueue) {
                 EraseOrphanTx(hash);
             }
         } else if (fMissingInputs) {
             // It may be the case that the orphans parents have all been
             // rejected.
             bool fRejectedParents = false;
             for (const CTxIn &txin : tx.vin) {
                 if (recentRejects->contains(txin.prevout.hash)) {
                     fRejectedParents = true;
                     break;
                 }
             }
             if (!fRejectedParents) {
                 uint32_t nFetchFlags = GetFetchFlags(
                     pfrom, chainActive.Tip(), chainparams.GetConsensus());
                 for (const CTxIn &txin : tx.vin) {
                     CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
                     pfrom->AddInventoryKnown(_inv);
                     if (!AlreadyHave(_inv)) {
                         pfrom->AskFor(_inv);
                     }
                 }
                 AddOrphanTx(ptx, pfrom->GetId());
 
                 // DoS prevention: do not allow mapOrphanTransactions to grow
                 // unbounded
                 unsigned int nMaxOrphanTx = (unsigned int)std::max(
                     int64_t(0),
                     GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
                 unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
                 if (nEvicted > 0) {
                     LogPrint(BCLog::MEMPOOL,
                              "mapOrphan overflow, removed %u tx\n", nEvicted);
                 }
             } else {
                 LogPrint(BCLog::MEMPOOL,
                          "not keeping orphan with rejected parents %s\n",
                          tx.GetId().ToString());
                 // We will continue to reject this tx since it has rejected
                 // parents so avoid re-requesting it from other peers.
                 recentRejects->insert(tx.GetId());
             }
         } else {
             if (!state.CorruptionPossible()) {
                 // Do not use rejection cache for witness transactions or
                 // witness-stripped transactions, as they can have been
                 // malleated. See https://github.com/bitcoin/bitcoin/issues/8279
                 // for details.
                 assert(recentRejects);
                 recentRejects->insert(tx.GetId());
                 if (RecursiveDynamicUsage(*ptx) < 100000) {
                     AddToCompactExtraTransactions(ptx);
                 }
             }
 
             if (pfrom->fWhitelisted &&
                 GetBoolArg("-whitelistforcerelay",
                            DEFAULT_WHITELISTFORCERELAY)) {
                 // Always relay transactions received from whitelisted peers,
                 // even if they were already in the mempool or rejected from it
                 // due to policy, allowing the node to function as a gateway for
                 // nodes hidden behind it.
                 //
                 // Never relay transactions that we would assign a non-zero DoS
                 // score for, as we expect peers to do the same with us in that
                 // case.
                 int nDoS = 0;
                 if (!state.IsInvalid(nDoS) || nDoS == 0) {
                     LogPrintf("Force relaying tx %s from whitelisted peer=%d\n",
                               tx.GetId().ToString(), pfrom->id);
                     RelayTransaction(tx, connman);
                 } else {
                     LogPrintf("Not relaying invalid transaction %s from "
                               "whitelisted peer=%d (%s)\n",
                               tx.GetId().ToString(), pfrom->id,
                               FormatStateMessage(state));
                 }
             }
         }
 
         for (const CTransactionRef &removedTx : lRemovedTxn) {
             AddToCompactExtraTransactions(removedTx);
         }
 
         int nDoS = 0;
         if (state.IsInvalid(nDoS)) {
             LogPrint(
                 BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n",
                 tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state));
             // Never send AcceptToMemoryPool's internal codes over P2P.
             if (state.GetRejectCode() > 0 &&
                 state.GetRejectCode() < REJECT_INTERNAL) {
                 connman.PushMessage(
                     pfrom,
                     msgMaker.Make(NetMsgType::REJECT, strCommand,
                                   uint8_t(state.GetRejectCode()),
                                   state.GetRejectReason().substr(
                                       0, MAX_REJECT_MESSAGE_LENGTH),
                                   inv.hash));
             }
             if (nDoS > 0) {
                 Misbehaving(pfrom, nDoS, state.GetRejectReason());
             }
         }
     }
 
     // Ignore blocks received while importing
     else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) {
         CBlockHeaderAndShortTxIDs cmpctblock;
         vRecv >> cmpctblock;
 
         {
             LOCK(cs_main);
 
             if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) ==
                 mapBlockIndex.end()) {
                 // Doesn't connect (or is genesis), instead of DoSing in
                 // AcceptBlockHeader, request deeper headers
                 if (!IsInitialBlockDownload()) {
                     connman.PushMessage(
                         pfrom,
                         msgMaker.Make(NetMsgType::GETHEADERS,
                                       chainActive.GetLocator(pindexBestHeader),
                                       uint256()));
                 }
                 return true;
             }
         }
 
         const CBlockIndex *pindex = nullptr;
         CValidationState state;
         if (!ProcessNewBlockHeaders(config, {cmpctblock.header}, state,
                                     &pindex)) {
             int nDoS;
             if (state.IsInvalid(nDoS)) {
                 if (nDoS > 0) {
                     LOCK(cs_main);
                     Misbehaving(pfrom, nDoS, state.GetRejectReason());
                 }
                 LogPrintf("Peer %d sent us invalid header via cmpctblock\n",
                           pfrom->id);
                 return true;
             }
         }
 
         // When we succeed in decoding a block's txids from a cmpctblock
         // message we typically jump to the BLOCKTXN handling code, with a
         // dummy (empty) BLOCKTXN message, to re-use the logic there in
         // completing processing of the putative block (without cs_main).
         bool fProcessBLOCKTXN = false;
         CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
 
         // If we end up treating this as a plain headers message, call that as
         // well
         // without cs_main.
         bool fRevertToHeaderProcessing = false;
         CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION);
 
         // Keep a CBlock for "optimistic" compactblock reconstructions (see
         // below)
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockReconstructed = false;
 
         {
             LOCK(cs_main);
             // If AcceptBlockHeader returned true, it set pindex
             assert(pindex);
             UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
 
             std::map<uint256,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator blockInFlightIt =
                     mapBlocksInFlight.find(pindex->GetBlockHash());
             bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
 
             if (pindex->nStatus & BLOCK_HAVE_DATA) {
                 // Nothing to do here
                 return true;
             }
 
             if (pindex->nChainWork <=
                     chainActive.Tip()->nChainWork || // We know something better
                 pindex->nTx != 0) {
                 // We had this block at some point, but pruned it
                 if (fAlreadyInFlight) {
                     // We requested this block for some reason, but our mempool
                     // will probably be useless so we just grab the block via
                     // normal getdata.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(
                         MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev,
                                                   chainparams.GetConsensus()),
                         cmpctblock.header.GetHash());
                     connman.PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                 }
                 return true;
             }
 
             // If we're not close to tip yet, give up and let parallel block
             // fetch work its magic.
             if (!fAlreadyInFlight &&
                 !CanDirectFetch(chainparams.GetConsensus())) {
                 return true;
             }
 
             CNodeState *nodestate = State(pfrom->GetId());
 
             // We want to be a bit conservative just to be extra careful about
             // DoS possibilities in compact block processing...
             if (pindex->nHeight <= chainActive.Height() + 2) {
                 if ((!fAlreadyInFlight &&
                      nodestate->nBlocksInFlight <
                          MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
                     (fAlreadyInFlight &&
                      blockInFlightIt->second.first == pfrom->GetId())) {
                     std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
                     if (!MarkBlockAsInFlight(config, pfrom->GetId(),
                                              pindex->GetBlockHash(),
                                              chainparams.GetConsensus(), pindex,
                                              &queuedBlockIt)) {
                         if (!(*queuedBlockIt)->partialBlock) {
                             (*queuedBlockIt)
                                 ->partialBlock.reset(
                                     new PartiallyDownloadedBlock(config,
                                                                  &mempool));
                         } else {
                             // The block was already in flight using compact
                             // blocks from the same peer.
                             LogPrint(BCLog::NET, "Peer sent us compact block "
                                                  "we were already syncing!\n");
                             return true;
                         }
                     }
 
                     PartiallyDownloadedBlock &partialBlock =
                         *(*queuedBlockIt)->partialBlock;
                     ReadStatus status =
                         partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status == READ_STATUS_INVALID) {
                         // Reset in-flight state in case of whitelist
                         MarkBlockAsReceived(pindex->GetBlockHash());
                         Misbehaving(pfrom, 100, "invalid-cmpctblk");
                         LogPrintf("Peer %d sent us invalid compact block\n",
                                   pfrom->id);
                         return true;
                     } else if (status == READ_STATUS_FAILED) {
                         // Duplicate txindexes, the block is now in-flight, so
                         // just request it.
                         std::vector<CInv> vInv(1);
                         vInv[0] =
                             CInv(MSG_BLOCK |
                                      GetFetchFlags(pfrom, pindex->pprev,
                                                    chainparams.GetConsensus()),
                                  cmpctblock.header.GetHash());
                         connman.PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                         return true;
                     }
 
                     BlockTransactionsRequest req;
                     for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
                         if (!partialBlock.IsTxAvailable(i)) {
                             req.indexes.push_back(i);
                         }
                     }
                     if (req.indexes.empty()) {
                         // Dirty hack to jump to BLOCKTXN code (TODO: move
                         // message handling into their own functions)
                         BlockTransactions txn;
                         txn.blockhash = cmpctblock.header.GetHash();
                         blockTxnMsg << txn;
                         fProcessBLOCKTXN = true;
                     } else {
                         req.blockhash = pindex->GetBlockHash();
                         connman.PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
                     }
                 } else {
                     // This block is either already in flight from a different
                     // peer, or this peer has too many blocks outstanding to
                     // download from. Optimistically try to reconstruct anyway
                     // since we might be able to without any round trips.
                     PartiallyDownloadedBlock tempBlock(config, &mempool);
                     ReadStatus status =
                         tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status != READ_STATUS_OK) {
                         // TODO: don't ignore failures
                         return true;
                     }
                     std::vector<CTransactionRef> dummy;
                     status = tempBlock.FillBlock(*pblock, dummy);
                     if (status == READ_STATUS_OK) {
                         fBlockReconstructed = true;
                     }
                 }
             } else {
                 if (fAlreadyInFlight) {
                     // We requested this block, but its far into the future, so
                     // our mempool will probably be useless - request the block
                     // normally.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(
                         MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev,
                                                   chainparams.GetConsensus()),
                         cmpctblock.header.GetHash());
                     connman.PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                     return true;
                 } else {
                     // If this was an announce-cmpctblock, we want the same
                     // treatment as a header message. Dirty hack to process as
                     // if it were just a headers message (TODO: move message
                     // handling into their own functions)
                     std::vector<CBlock> headers;
                     headers.push_back(cmpctblock.header);
                     vHeadersMsg << headers;
                     fRevertToHeaderProcessing = true;
                 }
             }
         } // cs_main
 
         if (fProcessBLOCKTXN) {
             return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
                                   blockTxnMsg, nTimeReceived, chainparams,
                                   connman, interruptMsgProc);
         }
 
         if (fRevertToHeaderProcessing) {
             return ProcessMessage(config, pfrom, NetMsgType::HEADERS,
                                   vHeadersMsg, nTimeReceived, chainparams,
                                   connman, interruptMsgProc);
         }
 
         if (fBlockReconstructed) {
             // If we got here, we were able to optimistically reconstruct a
             // block that is in flight from some other peer.
             {
                 LOCK(cs_main);
                 mapBlockSource.emplace(pblock->GetHash(),
                                        std::make_pair(pfrom->GetId(), false));
             }
             bool fNewBlock = false;
             ProcessNewBlock(config, pblock, true, &fNewBlock);
             if (fNewBlock) {
                 pfrom->nLastBlockTime = GetTime();
             }
 
             // hold cs_main for CBlockIndex::IsValid()
             LOCK(cs_main);
             if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
                 // Clear download state for this block, which is in process from
                 // some other peer. We do this after calling. ProcessNewBlock so
                 // that a malleated cmpctblock announcement can't be used to
                 // interfere with block relay.
                 MarkBlockAsReceived(pblock->GetHash());
             }
         }
 
     }
 
     else if (strCommand == NetMsgType::BLOCKTXN && !fImporting &&
              !fReindex) // Ignore blocks received while importing
     {
         BlockTransactions resp;
         vRecv >> resp;
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockRead = false;
         {
             LOCK(cs_main);
 
             std::map<uint256,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator it = mapBlocksInFlight.find(resp.blockhash);
             if (it == mapBlocksInFlight.end() ||
                 !it->second.second->partialBlock ||
                 it->second.first != pfrom->GetId()) {
                 LogPrint(BCLog::NET,
                          "Peer %d sent us block transactions for block "
                          "we weren't expecting\n",
                          pfrom->id);
                 return true;
             }
 
             PartiallyDownloadedBlock &partialBlock =
                 *it->second.second->partialBlock;
             ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
             if (status == READ_STATUS_INVALID) {
                 // Reset in-flight state in case of whitelist.
                 MarkBlockAsReceived(resp.blockhash);
                 Misbehaving(pfrom, 100, "invalid-cmpctblk-txns");
                 LogPrintf("Peer %d sent us invalid compact block/non-matching "
                           "block transactions\n",
                           pfrom->id);
                 return true;
             } else if (status == READ_STATUS_FAILED) {
                 // Might have collided, fall back to getdata now :(
                 std::vector<CInv> invs;
                 invs.push_back(
                     CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(),
                                                    chainparams.GetConsensus()),
                          resp.blockhash));
                 connman.PushMessage(pfrom,
                                     msgMaker.Make(NetMsgType::GETDATA, invs));
             } else {
                 // Block is either okay, or possibly we received
                 // READ_STATUS_CHECKBLOCK_FAILED.
                 // Note that CheckBlock can only fail for one of a few reasons:
                 // 1. bad-proof-of-work (impossible here, because we've already
                 //    accepted the header)
                 // 2. merkleroot doesn't match the transactions given (already
                 //    caught in FillBlock with READ_STATUS_FAILED, so
                 //    impossible here)
                 // 3. the block is otherwise invalid (eg invalid coinbase,
                 //    block is too big, too many legacy sigops, etc).
                 // So if CheckBlock failed, #3 is the only possibility.
                 // Under BIP 152, we don't DoS-ban unless proof of work is
                 // invalid (we don't require all the stateless checks to have
                 // been run). This is handled below, so just treat this as
                 // though the block was successfully read, and rely on the
                 // handling in ProcessNewBlock to ensure the block index is
                 // updated, reject messages go out, etc.
 
                 // it is now an empty pointer
                 MarkBlockAsReceived(resp.blockhash);
                 fBlockRead = true;
                 // mapBlockSource is only used for sending reject messages and
                 // DoS scores, so the race between here and cs_main in
                 // ProcessNewBlock is fine. BIP 152 permits peers to relay
                 // compact blocks after validating the header only; we should
                 // not punish peers if the block turns out to be invalid.
                 mapBlockSource.emplace(resp.blockhash,
                                        std::make_pair(pfrom->GetId(), false));
             }
         } // Don't hold cs_main when we call into ProcessNewBlock
         if (fBlockRead) {
             bool fNewBlock = false;
             // Since we requested this block (it was in mapBlocksInFlight),
             // force it to be processed, even if it would not be a candidate for
             // new tip (missing previous block, chain not long enough, etc)
             ProcessNewBlock(config, pblock, true, &fNewBlock);
             if (fNewBlock) {
                 pfrom->nLastBlockTime = GetTime();
             }
         }
     }
 
     // Ignore headers received while importing
     else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) {
         std::vector<CBlockHeader> headers;
 
         // Bypass the normal CBlock deserialization, as we don't want to risk
         // deserializing 2000 full blocks.
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > MAX_HEADERS_RESULTS) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-headers");
             return error("headers message size = %u", nCount);
         }
         headers.resize(nCount);
         for (unsigned int n = 0; n < nCount; n++) {
             vRecv >> headers[n];
             // Ignore tx count; assume it is 0.
             ReadCompactSize(vRecv);
         }
 
         if (nCount == 0) {
             // Nothing interesting. Stop asking this peers for more headers.
             return true;
         }
 
         const CBlockIndex *pindexLast = nullptr;
         {
             LOCK(cs_main);
             CNodeState *nodestate = State(pfrom->GetId());
 
             // If this looks like it could be a block announcement (nCount <
             // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers
             // that
             // don't connect:
             // - Send a getheaders message in response to try to connect the
             // chain.
             // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
             //   don't connect before giving DoS points
             // - Once a headers message is received that is valid and does
             // connect,
             //   nUnconnectingHeaders gets reset back to 0.
             if (mapBlockIndex.find(headers[0].hashPrevBlock) ==
                     mapBlockIndex.end() &&
                 nCount < MAX_BLOCKS_TO_ANNOUNCE) {
                 nodestate->nUnconnectingHeaders++;
                 connman.PushMessage(
                     pfrom,
                     msgMaker.Make(NetMsgType::GETHEADERS,
                                   chainActive.GetLocator(pindexBestHeader),
                                   uint256()));
                 LogPrint(BCLog::NET, "received header %s: missing prev block "
                                      "%s, sending getheaders (%d) to end "
                                      "(peer=%d, nUnconnectingHeaders=%d)\n",
                          headers[0].GetHash().ToString(),
                          headers[0].hashPrevBlock.ToString(),
                          pindexBestHeader->nHeight, pfrom->id,
                          nodestate->nUnconnectingHeaders);
                 // Set hashLastUnknownBlock for this peer, so that if we
                 // eventually get the headers - even from a different peer -
                 // we can use this peer to download.
                 UpdateBlockAvailability(pfrom->GetId(),
                                         headers.back().GetHash());
 
                 if (nodestate->nUnconnectingHeaders %
                         MAX_UNCONNECTING_HEADERS ==
                     0) {
                     // The peer is sending us many headers we can't connect.
                     Misbehaving(pfrom, 20, "too-many-unconnected-headers");
                 }
                 return true;
             }
 
             uint256 hashLastBlock;
             for (const CBlockHeader &header : headers) {
                 if (!hashLastBlock.IsNull() &&
                     header.hashPrevBlock != hashLastBlock) {
                     Misbehaving(pfrom, 20, "disconnected-header");
                     return error("non-continuous headers sequence");
                 }
                 hashLastBlock = header.GetHash();
             }
         }
 
         CValidationState state;
         if (!ProcessNewBlockHeaders(config, headers, state, &pindexLast)) {
             int nDoS;
             if (state.IsInvalid(nDoS)) {
                 if (nDoS > 0) {
                     LOCK(cs_main);
                     Misbehaving(pfrom, nDoS, state.GetRejectReason());
                 }
                 return error("invalid header received");
             }
         }
 
         {
             LOCK(cs_main);
             CNodeState *nodestate = State(pfrom->GetId());
             if (nodestate->nUnconnectingHeaders > 0) {
                 LogPrint(BCLog::NET,
                          "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
                          pfrom->id, nodestate->nUnconnectingHeaders);
             }
             nodestate->nUnconnectingHeaders = 0;
 
             assert(pindexLast);
             UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
 
             if (nCount == MAX_HEADERS_RESULTS) {
                 // Headers message had its maximum size; the peer may have more
                 // headers.
                 // TODO: optimize: if pindexLast is an ancestor of
                 // chainActive.Tip or pindexBestHeader, continue from there
                 // instead.
                 LogPrint(
                     BCLog::NET,
                     "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
                     pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight);
                 connman.PushMessage(
                     pfrom,
                     msgMaker.Make(NetMsgType::GETHEADERS,
                                   chainActive.GetLocator(pindexLast),
                                   uint256()));
             }
 
             bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
             // If this set of headers is valid and ends in a block with at least
             // as much work as our tip, download as much as possible.
             if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) &&
                 chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
                 std::vector<const CBlockIndex *> vToFetch;
                 const CBlockIndex *pindexWalk = pindexLast;
                 // Calculate all the blocks we'd need to switch to pindexLast,
                 // up to a limit.
                 while (pindexWalk && !chainActive.Contains(pindexWalk) &&
                        vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                     if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
                         !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
                         // We don't have this block, and it's not yet in flight.
                         vToFetch.push_back(pindexWalk);
                     }
                     pindexWalk = pindexWalk->pprev;
                 }
                 // If pindexWalk still isn't on our main chain, we're looking at
                 // a very large reorg at a time we think we're close to caught
                 // up to the main chain -- this shouldn't really happen. Bail
                 // out on the direct fetch and rely on parallel download
                 // instead.
                 if (!chainActive.Contains(pindexWalk)) {
                     LogPrint(BCLog::NET,
                              "Large reorg, won't direct fetch to %s (%d)\n",
                              pindexLast->GetBlockHash().ToString(),
                              pindexLast->nHeight);
                 } else {
                     std::vector<CInv> vGetData;
                     // Download as much as possible, from earliest to latest.
                     for (const CBlockIndex *pindex :
                          boost::adaptors::reverse(vToFetch)) {
                         if (nodestate->nBlocksInFlight >=
                             MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                             // Can't download any more from this peer
                             break;
                         }
                         uint32_t nFetchFlags = GetFetchFlags(
                             pfrom, pindex->pprev, chainparams.GetConsensus());
                         vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags,
                                                 pindex->GetBlockHash()));
                         MarkBlockAsInFlight(config, pfrom->GetId(),
                                             pindex->GetBlockHash(),
                                             chainparams.GetConsensus(), pindex);
                         LogPrint(BCLog::NET,
                                  "Requesting block %s from  peer=%d\n",
                                  pindex->GetBlockHash().ToString(), pfrom->id);
                     }
                     if (vGetData.size() > 1) {
                         LogPrint(BCLog::NET, "Downloading blocks toward %s "
                                              "(%d) via headers direct fetch\n",
                                  pindexLast->GetBlockHash().ToString(),
                                  pindexLast->nHeight);
                     }
                     if (vGetData.size() > 0) {
                         if (nodestate->fSupportsDesiredCmpctVersion &&
                             vGetData.size() == 1 &&
                             mapBlocksInFlight.size() == 1 &&
                             pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
                             // In any case, we want to download using a compact
                             // block, not a regular one.
                             vGetData[0] =
                                 CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
                         }
                         connman.PushMessage(
                             pfrom,
                             msgMaker.Make(NetMsgType::GETDATA, vGetData));
                     }
                 }
             }
         }
     }
 
     else if (strCommand == NetMsgType::BLOCK && !fImporting &&
              !fReindex) // Ignore blocks received while importing
     {
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         vRecv >> *pblock;
 
         LogPrint(BCLog::NET, "received block %s peer=%d\n",
                  pblock->GetHash().ToString(), pfrom->id);
 
         // Process all blocks from whitelisted peers, even if not requested,
         // unless we're still syncing with the network. Such an unrequested
         // block may still be processed, subject to the conditions in
         // AcceptBlock().
         bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload();
         const uint256 hash(pblock->GetHash());
         {
             LOCK(cs_main);
             // Also always process if we requested the block explicitly, as we
             // may need it even though it is not a candidate for a new best tip.
             forceProcessing |= MarkBlockAsReceived(hash);
             // mapBlockSource is only used for sending reject messages and DoS
             // scores, so the race between here and cs_main in ProcessNewBlock
             // is fine.
             mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
         }
         bool fNewBlock = false;
         ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock);
         if (fNewBlock) {
             pfrom->nLastBlockTime = GetTime();
         }
     }
 
     else if (strCommand == NetMsgType::GETADDR) {
         // This asymmetric behavior for inbound and outbound connections was
         // introduced to prevent a fingerprinting attack: an attacker can send
         // specific fake addresses to users' AddrMan and later request them by
         // sending getaddr messages. Making nodes which are behind NAT and can
         // only make outgoing connections ignore the getaddr message mitigates
         // the attack.
         if (!pfrom->fInbound) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from outbound connection. peer=%d\n",
                      pfrom->id);
             return true;
         }
 
         // Only send one GetAddr response per connection to reduce resource
         // waste and discourage addr stamping of INV announcements.
         if (pfrom->fSentAddr) {
             LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
                      pfrom->id);
             return true;
         }
         pfrom->fSentAddr = true;
 
         pfrom->vAddrToSend.clear();
         std::vector<CAddress> vAddr = connman.GetAddresses();
         FastRandomContext insecure_rand;
         for (const CAddress &addr : vAddr) {
             pfrom->PushAddress(addr, insecure_rand);
         }
     }
 
     else if (strCommand == NetMsgType::MEMPOOL) {
         if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted) {
             LogPrint(BCLog::NET, "mempool request with bloom filters disabled, "
                                  "disconnect peer=%d\n",
                      pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted) {
             LogPrint(BCLog::NET, "mempool request with bandwidth limit "
                                  "reached, disconnect peer=%d\n",
                      pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         LOCK(pfrom->cs_inventory);
         pfrom->fSendMempool = true;
     }
 
     else if (strCommand == NetMsgType::PING) {
         if (pfrom->nVersion > BIP0031_VERSION) {
             uint64_t nonce = 0;
             vRecv >> nonce;
             // Echo the message back with the nonce. This allows for two useful
             // features:
             //
             // 1) A remote node can quickly check if the connection is
             // operational.
             // 2) Remote nodes can measure the latency of the network thread. If
             // this node is overloaded it won't respond to pings quickly and the
             // remote node can avoid sending us more work, like chain download
             // requests.
             //
             // The nonce stops the remote getting confused between different
             // pings: without it, if the remote node sends a ping once per
             // second and this node takes 5 seconds to respond to each, the 5th
             // ping the remote sends would appear to return very quickly.
             connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
         }
     }
 
     else if (strCommand == NetMsgType::PONG) {
         int64_t pingUsecEnd = nTimeReceived;
         uint64_t nonce = 0;
         size_t nAvail = vRecv.in_avail();
         bool bPingFinished = false;
         std::string sProblem;
 
         if (nAvail >= sizeof(nonce)) {
             vRecv >> nonce;
 
             // Only process pong message if there is an outstanding ping (old
             // ping without nonce should never pong)
             if (pfrom->nPingNonceSent != 0) {
                 if (nonce == pfrom->nPingNonceSent) {
                     // Matching pong received, this ping is no longer
                     // outstanding
                     bPingFinished = true;
                     int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
                     if (pingUsecTime > 0) {
                         // Successful ping time measurement, replace previous
                         pfrom->nPingUsecTime = pingUsecTime;
                         pfrom->nMinPingUsecTime = std::min(
                             pfrom->nMinPingUsecTime.load(), pingUsecTime);
                     } else {
                         // This should never happen
                         sProblem = "Timing mishap";
                     }
                 } else {
                     // Nonce mismatches are normal when pings are overlapping
                     sProblem = "Nonce mismatch";
                     if (nonce == 0) {
                         // This is most likely a bug in another implementation
                         // somewhere; cancel this ping
                         bPingFinished = true;
                         sProblem = "Nonce zero";
                     }
                 }
             } else {
                 sProblem = "Unsolicited pong without ping";
             }
         } else {
             // This is most likely a bug in another implementation somewhere;
             // cancel this ping
             bPingFinished = true;
             sProblem = "Short payload";
         }
 
         if (!(sProblem.empty())) {
             LogPrint(BCLog::NET,
                      "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
                      pfrom->id, sProblem, pfrom->nPingNonceSent, nonce, nAvail);
         }
         if (bPingFinished) {
             pfrom->nPingNonceSent = 0;
         }
     }
 
     else if (strCommand == NetMsgType::FILTERLOAD) {
         CBloomFilter filter;
         vRecv >> filter;
 
         if (!filter.IsWithinSizeConstraints()) {
             // There is no excuse for sending a too-large filter
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "oversized-bloom-filter");
         } else {
             LOCK(pfrom->cs_filter);
             delete pfrom->pfilter;
             pfrom->pfilter = new CBloomFilter(filter);
             pfrom->pfilter->UpdateEmptyFull();
             pfrom->fRelayTxes = true;
         }
     }
 
     else if (strCommand == NetMsgType::FILTERADD) {
         std::vector<uint8_t> vData;
         vRecv >> vData;
 
         // Nodes must NEVER send a data item > 520 bytes (the max size for a
         // script data object, and thus, the maximum size any matched object can
         // have) in a filteradd message.
         bool bad = false;
         if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
             bad = true;
         } else {
             LOCK(pfrom->cs_filter);
             if (pfrom->pfilter) {
                 pfrom->pfilter->insert(vData);
             } else {
                 bad = true;
             }
         }
         if (bad) {
             LOCK(cs_main);
             // The structure of this code doesn't really allow for a good error
             // code. We'll go generic.
             Misbehaving(pfrom, 100, "invalid-filteradd");
         }
     }
 
     else if (strCommand == NetMsgType::FILTERCLEAR) {
         LOCK(pfrom->cs_filter);
         if (pfrom->GetLocalServices() & NODE_BLOOM) {
             delete pfrom->pfilter;
             pfrom->pfilter = new CBloomFilter();
         }
         pfrom->fRelayTxes = true;
     }
 
     else if (strCommand == NetMsgType::FEEFILTER) {
         Amount newFeeFilter(0);
         vRecv >> newFeeFilter;
         if (MoneyRange(newFeeFilter)) {
             {
                 LOCK(pfrom->cs_feeFilter);
                 pfrom->minFeeFilter = newFeeFilter;
             }
             LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
                      CFeeRate(newFeeFilter).ToString(), pfrom->id);
         }
     }
 
     else if (strCommand == NetMsgType::NOTFOUND) {
         // We do not care about the NOTFOUND message, but logging an Unknown
         // Command message would be undesirable as we transmit it ourselves.
     }
 
     else {
         // Ignore unknown commands for extensibility
         LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
                  SanitizeString(strCommand), pfrom->id);
     }
 
     return true;
 }
 
 static bool SendRejectsAndCheckIfBanned(CNode *pnode, CConnman &connman) {
     AssertLockHeld(cs_main);
     CNodeState &state = *State(pnode->GetId());
 
     for (const CBlockReject &reject : state.rejects) {
         connman.PushMessage(
             pnode,
             CNetMsgMaker(INIT_PROTO_VERSION)
                 .Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK,
                       reject.chRejectCode, reject.strRejectReason,
                       reject.hashBlock));
     }
     state.rejects.clear();
 
     if (state.fShouldBan) {
         state.fShouldBan = false;
         if (pnode->fWhitelisted) {
             LogPrintf("Warning: not punishing whitelisted peer %s!\n",
                       pnode->addr.ToString());
         } else if (pnode->fAddnode) {
             LogPrintf("Warning: not punishing addnoded peer %s!\n",
                       pnode->addr.ToString());
         } else {
             pnode->fDisconnect = true;
             if (pnode->addr.IsLocal()) {
                 LogPrintf("Warning: not banning local peer %s!\n",
                           pnode->addr.ToString());
             } else {
                 connman.Ban(pnode->addr, BanReasonNodeMisbehaving);
             }
         }
         return true;
     }
     return false;
 }
 
 bool ProcessMessages(const Config &config, CNode *pfrom, CConnman &connman,
                      const std::atomic<bool> &interruptMsgProc) {
     const CChainParams &chainparams = Params();
     //
     // Message format
     //  (4) message start
     //  (12) command
     //  (4) size
     //  (4) checksum
     //  (x) data
     //
     bool fMoreWork = false;
 
     if (!pfrom->vRecvGetData.empty()) {
         ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman,
                        interruptMsgProc);
     }
 
     if (pfrom->fDisconnect) {
         return false;
     }
 
     // this maintains the order of responses
     if (!pfrom->vRecvGetData.empty()) {
         return true;
     }
 
     // Don't bother if send buffer is too full to respond anyway
     if (pfrom->fPauseSend) {
         return false;
     }
 
     std::list<CNetMessage> msgs;
     {
         LOCK(pfrom->cs_vProcessMsg);
         if (pfrom->vProcessMsg.empty()) {
             return false;
         }
         // Just take one message
         msgs.splice(msgs.begin(), pfrom->vProcessMsg,
                     pfrom->vProcessMsg.begin());
         pfrom->nProcessQueueSize -=
             msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
         pfrom->fPauseRecv =
             pfrom->nProcessQueueSize > connman.GetReceiveFloodSize();
         fMoreWork = !pfrom->vProcessMsg.empty();
     }
     CNetMessage &msg(msgs.front());
 
     msg.SetVersion(pfrom->GetRecvVersion());
 
     // Scan for message start
     if (memcmp(std::begin(msg.hdr.pchMessageStart),
                std::begin(chainparams.NetMagic()),
                CMessageHeader::MESSAGE_START_SIZE) != 0) {
         LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
                   SanitizeString(msg.hdr.GetCommand()), pfrom->id);
         pfrom->fDisconnect = true;
         return false;
     }
 
     // Read header
     CMessageHeader &hdr = msg.hdr;
     if (!hdr.IsValid(chainparams.NetMagic())) {
         LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
                   SanitizeString(hdr.GetCommand()), pfrom->id);
         return fMoreWork;
     }
     std::string strCommand = hdr.GetCommand();
 
     // Message size
     unsigned int nMessageSize = hdr.nMessageSize;
 
     // Checksum
     CDataStream &vRecv = msg.vRecv;
     const uint256 &hash = msg.GetMessageHash();
     if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) !=
         0) {
         LogPrintf(
             "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
             SanitizeString(strCommand), nMessageSize,
             HexStr(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE),
             HexStr(hdr.pchChecksum,
                    hdr.pchChecksum + CMessageHeader::CHECKSUM_SIZE));
         return fMoreWork;
     }
 
     // Process message
     bool fRet = false;
     try {
         fRet = ProcessMessage(config, pfrom, strCommand, vRecv, msg.nTime,
                               chainparams, connman, interruptMsgProc);
         if (interruptMsgProc) {
             return false;
         }
         if (!pfrom->vRecvGetData.empty()) {
             fMoreWork = true;
         }
     } catch (const std::ios_base::failure &e) {
         connman.PushMessage(pfrom,
                             CNetMsgMaker(INIT_PROTO_VERSION)
                                 .Make(NetMsgType::REJECT, strCommand,
                                       REJECT_MALFORMED,
                                       std::string("error parsing message")));
         if (strstr(e.what(), "end of data")) {
             // Allow exceptions from under-length message on vRecv
             LogPrintf(
                 "%s(%s, %u bytes): Exception '%s' caught, normally caused by a "
                 "message being shorter than its stated length\n",
                 __func__, SanitizeString(strCommand), nMessageSize, e.what());
         } else if (strstr(e.what(), "size too large")) {
             // Allow exceptions from over-long size
             LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__,
                       SanitizeString(strCommand), nMessageSize, e.what());
         } else if (strstr(e.what(), "non-canonical ReadCompactSize()")) {
             // Allow exceptions from non-canonical encoding
             LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__,
                       SanitizeString(strCommand), nMessageSize, e.what());
         } else {
             PrintExceptionContinue(&e, "ProcessMessages()");
         }
     } catch (const std::exception &e) {
         PrintExceptionContinue(&e, "ProcessMessages()");
     } catch (...) {
         PrintExceptionContinue(nullptr, "ProcessMessages()");
     }
 
     if (!fRet) {
         LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__,
                   SanitizeString(strCommand), nMessageSize, pfrom->id);
     }
 
     LOCK(cs_main);
     SendRejectsAndCheckIfBanned(pfrom, connman);
 
     return fMoreWork;
 }
 
 class CompareInvMempoolOrder {
     CTxMemPool *mp;
 
 public:
     CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; }
 
     bool operator()(std::set<uint256>::iterator a,
                     std::set<uint256>::iterator b) {
         /* As std::make_heap produces a max-heap, we want the entries with the
          * fewest ancestors/highest fee to sort later. */
         return mp->CompareDepthAndScore(*b, *a);
     }
 };
 
 bool SendMessages(const Config &config, CNode *pto, CConnman &connman,
                   const std::atomic<bool> &interruptMsgProc) {
     const Consensus::Params &consensusParams = Params().GetConsensus();
 
     // Don't send anything until the version handshake is complete
     if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
         return true;
     }
 
     // If we get here, the outgoing message serialization version is set and
     // can't change.
     const CNetMsgMaker msgMaker(pto->GetSendVersion());
 
     //
     // Message: ping
     //
     bool pingSend = false;
     if (pto->fPingQueued) {
         // RPC ping request by user
         pingSend = true;
     }
     if (pto->nPingNonceSent == 0 &&
         pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
         // Ping automatically sent as a latency probe & keepalive.
         pingSend = true;
     }
     if (pingSend) {
         uint64_t nonce = 0;
         while (nonce == 0) {
             GetRandBytes((uint8_t *)&nonce, sizeof(nonce));
         }
         pto->fPingQueued = false;
         pto->nPingUsecStart = GetTimeMicros();
         if (pto->nVersion > BIP0031_VERSION) {
             pto->nPingNonceSent = nonce;
             connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
         } else {
             // Peer is too old to support ping command with nonce, pong will
             // never arrive.
             pto->nPingNonceSent = 0;
             connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING));
         }
     }
 
     // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
     TRY_LOCK(cs_main, lockMain);
     if (!lockMain) {
         return true;
     }
 
     if (SendRejectsAndCheckIfBanned(pto, connman)) {
         return true;
     }
     CNodeState &state = *State(pto->GetId());
 
     // Address refresh broadcast
     int64_t nNow = GetTimeMicros();
     if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
         AdvertiseLocal(pto);
         pto->nNextLocalAddrSend =
             PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
     }
 
     //
     // Message: addr
     //
     if (pto->nNextAddrSend < nNow) {
         pto->nNextAddrSend =
             PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
         std::vector<CAddress> vAddr;
         vAddr.reserve(pto->vAddrToSend.size());
         for (const CAddress &addr : pto->vAddrToSend) {
             if (!pto->addrKnown.contains(addr.GetKey())) {
                 pto->addrKnown.insert(addr.GetKey());
                 vAddr.push_back(addr);
                 // receiver rejects addr messages larger than 1000
                 if (vAddr.size() >= 1000) {
                     connman.PushMessage(pto,
                                         msgMaker.Make(NetMsgType::ADDR, vAddr));
                     vAddr.clear();
                 }
             }
         }
         pto->vAddrToSend.clear();
         if (!vAddr.empty()) {
             connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
         }
 
         // we only send the big addr message once
         if (pto->vAddrToSend.capacity() > 40) {
             pto->vAddrToSend.shrink_to_fit();
         }
     }
 
     // Start block sync
     if (pindexBestHeader == nullptr) {
         pindexBestHeader = chainActive.Tip();
     }
 
     // Download if this is a nice peer, or we have no nice peers and this one
     // might do.
     bool fFetch = state.fPreferredDownload ||
                   (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot);
 
     if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
         // Only actively request headers from a single peer, unless we're close
         // to today.
         if ((nSyncStarted == 0 && fFetch) ||
             pindexBestHeader->GetBlockTime() >
                 GetAdjustedTime() - 24 * 60 * 60) {
             state.fSyncStarted = true;
             nSyncStarted++;
             const CBlockIndex *pindexStart = pindexBestHeader;
             /**
              * If possible, start at the block preceding the currently best
              * known header. This ensures that we always get a non-empty list of
              * headers back as long as the peer is up-to-date. With a non-empty
              * response, we can initialise the peer's known best block. This
              * wouldn't be possible if we requested starting at pindexBestHeader
              * and got back an empty response.
              */
             if (pindexStart->pprev) {
                 pindexStart = pindexStart->pprev;
             }
 
             LogPrint(BCLog::NET,
                      "initial getheaders (%d) to peer=%d (startheight:%d)\n",
                      pindexStart->nHeight, pto->id, pto->nStartingHeight);
             connman.PushMessage(
                 pto,
                 msgMaker.Make(NetMsgType::GETHEADERS,
                               chainActive.GetLocator(pindexStart), uint256()));
         }
     }
 
     // Resend wallet transactions that haven't gotten in a block yet
     // Except during reindex, importing and IBD, when old wallet transactions
     // become unconfirmed and spams other nodes.
     if (!fReindex && !fImporting && !IsInitialBlockDownload()) {
         GetMainSignals().Broadcast(nTimeBestReceived, &connman);
     }
 
     //
     // Try sending block announcements via headers
     //
     {
         // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
         // hashes we're relaying, and our peer wants headers announcements, then
         // find the first header not yet known to our peer but would connect,
         // and send. If no header would connect, or if we have too many blocks,
         // or if the peer doesn't want headers, just add all to the inv queue.
         LOCK(pto->cs_inventory);
         std::vector<CBlock> vHeaders;
         bool fRevertToInv =
             ((!state.fPreferHeaders &&
               (!state.fPreferHeaderAndIDs ||
                pto->vBlockHashesToAnnounce.size() > 1)) ||
              pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
         // last header queued for delivery
         const CBlockIndex *pBestIndex = nullptr;
         // ensure pindexBestKnownBlock is up-to-date
         ProcessBlockAvailability(pto->id);
 
         if (!fRevertToInv) {
             bool fFoundStartingHeader = false;
             // Try to find first header that our peer doesn't have, and then
             // send all headers past that one. If we come across an headers that
             // aren't on chainActive, give up.
             for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
                 BlockMap::iterator mi = mapBlockIndex.find(hash);
                 assert(mi != mapBlockIndex.end());
                 const CBlockIndex *pindex = mi->second;
                 if (chainActive[pindex->nHeight] != pindex) {
                     // Bail out if we reorged away from this block
                     fRevertToInv = true;
                     break;
                 }
                 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
                     // This means that the list of blocks to announce don't
                     // connect to each other. This shouldn't really be possible
                     // to hit during regular operation (because reorgs should
                     // take us to a chain that has some block not on the prior
                     // chain, which should be caught by the prior check), but
                     // one way this could happen is by using invalidateblock /
                     // reconsiderblock repeatedly on the tip, causing it to be
                     // added multiple times to vBlockHashesToAnnounce. Robustly
                     // deal with this rare situation by reverting to an inv.
                     fRevertToInv = true;
                     break;
                 }
                 pBestIndex = pindex;
                 if (fFoundStartingHeader) {
                     // add this to the headers message
                     vHeaders.push_back(pindex->GetBlockHeader());
                 } else if (PeerHasHeader(&state, pindex)) {
                     // Keep looking for the first new block.
                     continue;
                 } else if (pindex->pprev == nullptr ||
                            PeerHasHeader(&state, pindex->pprev)) {
                     // Peer doesn't have this header but they do have the prior
                     // one.
                     // Start sending headers.
                     fFoundStartingHeader = true;
                     vHeaders.push_back(pindex->GetBlockHeader());
                 } else {
                     // Peer doesn't have this header or the prior one --
                     // nothing will connect, so bail out.
                     fRevertToInv = true;
                     break;
                 }
             }
         }
         if (!fRevertToInv && !vHeaders.empty()) {
             if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
                 // We only send up to 1 block as header-and-ids, as otherwise
                 // probably means we're doing an initial-ish-sync or they're
                 // slow.
                 LogPrint(BCLog::NET,
                          "%s sending header-and-ids %s to peer=%d\n", __func__,
                          vHeaders.front().GetHash().ToString(), pto->id);
 
                 int nSendFlags = 0;
 
                 bool fGotBlockFromCache = false;
                 {
                     LOCK(cs_most_recent_block);
                     if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
                         CBlockHeaderAndShortTxIDs cmpctblock(
                             *most_recent_block);
                         connman.PushMessage(
                             pto,
                             msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                           cmpctblock));
                         fGotBlockFromCache = true;
                     }
                 }
                 if (!fGotBlockFromCache) {
                     CBlock block;
                     bool ret = ReadBlockFromDisk(block, pBestIndex, config);
                     assert(ret);
                     CBlockHeaderAndShortTxIDs cmpctblock(block);
                     connman.PushMessage(pto,
                                         msgMaker.Make(nSendFlags,
                                                       NetMsgType::CMPCTBLOCK,
                                                       cmpctblock));
                 }
                 state.pindexBestHeaderSent = pBestIndex;
             } else if (state.fPreferHeaders) {
                 if (vHeaders.size() > 1) {
                     LogPrint(BCLog::NET,
                              "%s: %u headers, range (%s, %s), to peer=%d\n",
                              __func__, vHeaders.size(),
                              vHeaders.front().GetHash().ToString(),
                              vHeaders.back().GetHash().ToString(), pto->id);
                 } else {
                     LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n",
                              __func__, vHeaders.front().GetHash().ToString(),
                              pto->id);
                 }
                 connman.PushMessage(
                     pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
                 state.pindexBestHeaderSent = pBestIndex;
             } else {
                 fRevertToInv = true;
             }
         }
         if (fRevertToInv) {
             // If falling back to using an inv, just try to inv the tip. The
             // last entry in vBlockHashesToAnnounce was our tip at some point in
             // the past.
             if (!pto->vBlockHashesToAnnounce.empty()) {
                 const uint256 &hashToAnnounce =
                     pto->vBlockHashesToAnnounce.back();
                 BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
                 assert(mi != mapBlockIndex.end());
                 const CBlockIndex *pindex = mi->second;
 
                 // Warn if we're announcing a block that is not on the main
                 // chain. This should be very rare and could be optimized out.
                 // Just log for now.
                 if (chainActive[pindex->nHeight] != pindex) {
                     LogPrint(BCLog::NET,
                              "Announcing block %s not on main chain (tip=%s)\n",
                              hashToAnnounce.ToString(),
                              chainActive.Tip()->GetBlockHash().ToString());
                 }
 
                 // If the peer's chain has this block, don't inv it back.
                 if (!PeerHasHeader(&state, pindex)) {
                     pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
                     LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n",
                              __func__, pto->id, hashToAnnounce.ToString());
                 }
             }
         }
         pto->vBlockHashesToAnnounce.clear();
     }
 
     //
     // Message: inventory
     //
     std::vector<CInv> vInv;
     {
         LOCK(pto->cs_inventory);
         vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
                                       INVENTORY_BROADCAST_MAX));
 
         // Add blocks
         for (const uint256 &hash : pto->vInventoryBlockToSend) {
             vInv.push_back(CInv(MSG_BLOCK, hash));
             if (vInv.size() == MAX_INV_SZ) {
                 connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
                 vInv.clear();
             }
         }
         pto->vInventoryBlockToSend.clear();
 
         // Check whether periodic sends should happen
         bool fSendTrickle = pto->fWhitelisted;
         if (pto->nNextInvSend < nNow) {
             fSendTrickle = true;
             // Use half the delay for outbound peers, as there is less privacy
             // concern for them.
             pto->nNextInvSend = PoissonNextSend(
                 nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
         }
 
         // Time to send but the peer has requested we not relay transactions.
         if (fSendTrickle) {
             LOCK(pto->cs_filter);
             if (!pto->fRelayTxes) {
                 pto->setInventoryTxToSend.clear();
             }
         }
 
         // Respond to BIP35 mempool requests
         if (fSendTrickle && pto->fSendMempool) {
             auto vtxinfo = mempool.infoAll();
             pto->fSendMempool = false;
             Amount filterrate(0);
             {
                 LOCK(pto->cs_feeFilter);
                 filterrate = pto->minFeeFilter;
             }
 
             LOCK(pto->cs_filter);
 
             for (const auto &txinfo : vtxinfo) {
                 const uint256 &txid = txinfo.tx->GetId();
                 CInv inv(MSG_TX, txid);
                 pto->setInventoryTxToSend.erase(txid);
                 if (filterrate != Amount(0)) {
                     if (txinfo.feeRate.GetFeePerK() < filterrate) {
                         continue;
                     }
                 }
                 if (pto->pfilter) {
                     if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) {
                         continue;
                     }
                 }
                 pto->filterInventoryKnown.insert(txid);
                 vInv.push_back(inv);
                 if (vInv.size() == MAX_INV_SZ) {
                     connman.PushMessage(pto,
                                         msgMaker.Make(NetMsgType::INV, vInv));
                     vInv.clear();
                 }
             }
             pto->timeLastMempoolReq = GetTime();
         }
 
         // Determine transactions to relay
         if (fSendTrickle) {
             // Produce a vector with all candidates for sending
             std::vector<std::set<uint256>::iterator> vInvTx;
             vInvTx.reserve(pto->setInventoryTxToSend.size());
             for (std::set<uint256>::iterator it =
                      pto->setInventoryTxToSend.begin();
                  it != pto->setInventoryTxToSend.end(); it++) {
                 vInvTx.push_back(it);
             }
             Amount filterrate(0);
             {
                 LOCK(pto->cs_feeFilter);
                 filterrate = pto->minFeeFilter;
             }
             // Topologically and fee-rate sort the inventory we send for privacy
             // and priority reasons. A heap is used so that not all items need
             // sorting if only a few are being sent.
             CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
             std::make_heap(vInvTx.begin(), vInvTx.end(),
                            compareInvMempoolOrder);
             // No reason to drain out at many times the network's capacity,
             // especially since we have many peers and some will draw much
             // shorter delays.
             unsigned int nRelayedTransactions = 0;
             LOCK(pto->cs_filter);
             while (!vInvTx.empty() &&
                    nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
                 // Fetch the top element from the heap
                 std::pop_heap(vInvTx.begin(), vInvTx.end(),
                               compareInvMempoolOrder);
                 std::set<uint256>::iterator it = vInvTx.back();
                 vInvTx.pop_back();
                 uint256 hash = *it;
                 // Remove it from the to-be-sent set
                 pto->setInventoryTxToSend.erase(it);
                 // Check if not in the filter already
                 if (pto->filterInventoryKnown.contains(hash)) {
                     continue;
                 }
                 // Not in the mempool anymore? don't bother sending it.
                 auto txinfo = mempool.info(hash);
                 if (!txinfo.tx) {
                     continue;
                 }
                 if (filterrate != Amount(0) &&
                     txinfo.feeRate.GetFeePerK() < filterrate) {
                     continue;
                 }
                 if (pto->pfilter &&
                     !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) {
                     continue;
                 }
                 // Send
                 vInv.push_back(CInv(MSG_TX, hash));
                 nRelayedTransactions++;
                 {
                     // Expire old relay messages
                     while (!vRelayExpiration.empty() &&
                            vRelayExpiration.front().first < nNow) {
                         mapRelay.erase(vRelayExpiration.front().second);
                         vRelayExpiration.pop_front();
                     }
 
                     auto ret = mapRelay.insert(
                         std::make_pair(hash, std::move(txinfo.tx)));
                     if (ret.second) {
                         vRelayExpiration.push_back(std::make_pair(
                             nNow + 15 * 60 * 1000000, ret.first));
                     }
                 }
                 if (vInv.size() == MAX_INV_SZ) {
                     connman.PushMessage(pto,
                                         msgMaker.Make(NetMsgType::INV, vInv));
                     vInv.clear();
                 }
                 pto->filterInventoryKnown.insert(hash);
             }
         }
     }
     if (!vInv.empty()) {
         connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
     }
 
     // Detect whether we're stalling
     nNow = GetTimeMicros();
     if (state.nStallingSince &&
         state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
         // Stalling only triggers when the block download window cannot move.
         // During normal steady state, the download window should be much larger
         // than the to-be-downloaded set of blocks, so disconnection should only
         // happen during initial block download.
         LogPrintf("Peer=%d is stalling block download, disconnecting\n",
                   pto->id);
         pto->fDisconnect = true;
         return true;
     }
     // In case there is a block that has been in flight from this peer for 2 +
     // 0.5 * N times the block interval (with N the number of peers from which
     // we're downloading validated blocks), disconnect due to timeout. We
     // compensate for other peers to prevent killing off peers due to our own
     // downstream link being saturated. We only count validated in-flight blocks
     // so peers can't advertise non-existing block hashes to unreasonably
     // increase our timeout.
     if (state.vBlocksInFlight.size() > 0) {
         QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
         int nOtherPeersWithValidatedDownloads =
             nPeersWithValidatedDownloads -
             (state.nBlocksInFlightValidHeaders > 0);
         if (nNow > state.nDownloadingSince +
                        consensusParams.nPowTargetSpacing *
                            (BLOCK_DOWNLOAD_TIMEOUT_BASE +
                             BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
                                 nOtherPeersWithValidatedDownloads)) {
             LogPrintf("Timeout downloading block %s from peer=%d, "
                       "disconnecting\n",
                       queuedBlock.hash.ToString(), pto->id);
             pto->fDisconnect = true;
             return true;
         }
     }
 
     //
     // Message: getdata (blocks)
     //
     std::vector<CInv> vGetData;
     if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) &&
         state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
         std::vector<const CBlockIndex *> vToDownload;
         NodeId staller = -1;
         FindNextBlocksToDownload(pto->GetId(),
                                  MAX_BLOCKS_IN_TRANSIT_PER_PEER -
                                      state.nBlocksInFlight,
                                  vToDownload, staller, consensusParams);
         for (const CBlockIndex *pindex : vToDownload) {
             uint32_t nFetchFlags =
                 GetFetchFlags(pto, pindex->pprev, consensusParams);
             vGetData.push_back(
                 CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
             MarkBlockAsInFlight(config, pto->GetId(), pindex->GetBlockHash(),
                                 consensusParams, pindex);
             LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
                      pindex->GetBlockHash().ToString(), pindex->nHeight,
                      pto->id);
         }
         if (state.nBlocksInFlight == 0 && staller != -1) {
             if (State(staller)->nStallingSince == 0) {
                 State(staller)->nStallingSince = nNow;
                 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
             }
         }
     }
 
     //
     // Message: getdata (non-blocks)
     //
     while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) {
         const CInv &inv = (*pto->mapAskFor.begin()).second;
         if (!AlreadyHave(inv)) {
             LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(),
                      pto->id);
             vGetData.push_back(inv);
             if (vGetData.size() >= 1000) {
                 connman.PushMessage(
                     pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
                 vGetData.clear();
             }
         } else {
             // If we're not going to ask, don't expect a response.
             pto->setAskFor.erase(inv.hash);
         }
         pto->mapAskFor.erase(pto->mapAskFor.begin());
     }
     if (!vGetData.empty()) {
         connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
     }
 
     //
     // Message: feefilter
     //
     // We don't want white listed peers to filter txs to us if we have
     // -whitelistforcerelay
     if (pto->nVersion >= FEEFILTER_VERSION &&
         GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
         !(pto->fWhitelisted &&
           GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
         Amount currentFilter =
             mempool
                 .GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
                            1000000)
                 .GetFeePerK();
         int64_t timeNow = GetTimeMicros();
         if (timeNow > pto->nextSendTimeFeeFilter) {
             static CFeeRate default_feerate =
                 CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
             static FeeFilterRounder filterRounder(default_feerate);
             Amount filterToSend = filterRounder.round(currentFilter);
             // If we don't allow free transactions, then we always have a fee
             // filter of at least minRelayTxFee
             if (GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0) {
                 filterToSend =
                     std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
             }
 
             if (filterToSend != pto->lastSentFeeFilter) {
                 connman.PushMessage(
                     pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
                 pto->lastSentFeeFilter = filterToSend;
             }
             pto->nextSendTimeFeeFilter =
                 PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
         }
         // If the fee filter has changed substantially and it's still more than
         // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
         // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
         else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
                      pto->nextSendTimeFeeFilter &&
                  (currentFilter < 3 * pto->lastSentFeeFilter / 4 ||
                   currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
             pto->nextSendTimeFeeFilter =
                 timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
         }
     }
     return true;
 }
 
 class CNetProcessingCleanup {
 public:
     CNetProcessingCleanup() {}
     ~CNetProcessingCleanup() {
         // orphan transactions
         mapOrphanTransactions.clear();
         mapOrphanTransactionsByPrev.clear();
     }
 } instance_of_cnetprocessingcleanup;
diff --git a/src/net_processing.h b/src/net_processing.h
index e7a220c4b..85a5b361e 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -1,74 +1,76 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_NET_PROCESSING_H
 #define BITCOIN_NET_PROCESSING_H
 
 #include "net.h"
 #include "validationinterface.h"
 
 class Config;
 
 /** Default for -maxorphantx, maximum number of orphan transactions kept in
  * memory */
 static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
 /** Expiration time for orphan transactions in seconds */
 static const int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
 /** Minimum time between orphan transactions expire time checks in seconds */
 static const int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
 /** Default number of orphan+recently-replaced txn to keep around for block
  * reconstruction */
 static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100;
 
 /** Register with a network node to receive its signals */
 void RegisterNodeSignals(CNodeSignals &nodeSignals);
 /** Unregister a network node */
 void UnregisterNodeSignals(CNodeSignals &nodeSignals);
 
 class PeerLogicValidation : public CValidationInterface {
 private:
     CConnman *connman;
 
 public:
     PeerLogicValidation(CConnman *connmanIn);
 
-    void SyncTransaction(const CTransaction &tx, const CBlockIndex *pindex,
-                         int nPosInBlock) override;
+    void
+    BlockConnected(const std::shared_ptr<const CBlock> &pblock,
+                   const CBlockIndex *pindexConnected,
+                   const std::vector<CTransactionRef> &vtxConflicted) override;
     void UpdatedBlockTip(const CBlockIndex *pindexNew,
                          const CBlockIndex *pindexFork,
                          bool fInitialDownload) override;
     void BlockChecked(const CBlock &block,
                       const CValidationState &state) override;
     void NewPoWValidBlock(const CBlockIndex *pindex,
                           const std::shared_ptr<const CBlock> &pblock) override;
 };
 
 struct CNodeStateStats {
     int nMisbehavior;
     int nSyncHeight;
     int nCommonHeight;
     std::vector<int> vHeightInFlight;
 };
 
 /** Get statistics from node state */
 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats);
 /** Increase a node's misbehavior score. */
 void Misbehaving(NodeId nodeid, int howmuch, const std::string &reason);
 
 /** Process protocol messages received from a given node */
 bool ProcessMessages(const Config &config, CNode *pfrom, CConnman &connman,
                      const std::atomic<bool> &interrupt);
 /**
  * Send queued protocol messages to be sent to a give node.
  *
  * @param[in]   pto             The node which we are sending messages to.
  * @param[in]   connman         The connection manager for that node.
  * @param[in]   interrupt       Interrupt condition for processing threads
  * @return                      True if there is more work to be done
  */
 bool SendMessages(const Config &config, CNode *pto, CConnman &connman,
                   const std::atomic<bool> &interrupt);
 
 #endif // BITCOIN_NET_PROCESSING_H
diff --git a/src/validation.cpp b/src/validation.cpp
index a43c8d46e..0fa95f18e 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1,5086 +1,5087 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Copyright (c) 2017 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "validation.h"
 
 #include "arith_uint256.h"
 #include "chainparams.h"
 #include "checkpoints.h"
 #include "checkqueue.h"
 #include "config.h"
 #include "consensus/consensus.h"
 #include "consensus/merkle.h"
 #include "consensus/validation.h"
 #include "fs.h"
 #include "hash.h"
 #include "init.h"
 #include "policy/fees.h"
 #include "policy/policy.h"
 #include "pow.h"
 #include "primitives/block.h"
 #include "primitives/transaction.h"
 #include "random.h"
 #include "script/script.h"
 #include "script/scriptcache.h"
 #include "script/sigcache.h"
 #include "script/standard.h"
 #include "timedata.h"
 #include "tinyformat.h"
 #include "txdb.h"
 #include "txmempool.h"
 #include "ui_interface.h"
 #include "undo.h"
 #include "util.h"
 #include "utilmoneystr.h"
 #include "utilstrencodings.h"
 #include "validationinterface.h"
 #include "versionbits.h"
 #include "warnings.h"
 
 #include <atomic>
 #include <sstream>
 
 #include <boost/algorithm/string/join.hpp>
 #include <boost/algorithm/string/replace.hpp>
 #include <boost/filesystem/fstream.hpp>
 #include <boost/math/distributions/poisson.hpp>
 #include <boost/range/adaptor/reversed.hpp>
 #include <boost/thread.hpp>
 
 #if defined(NDEBUG)
 #error "Bitcoin cannot be compiled without assertions."
 #endif
 
 /**
  * Global state
  */
 
 CCriticalSection cs_main;
 
 BlockMap mapBlockIndex;
 CChain chainActive;
 CBlockIndex *pindexBestHeader = nullptr;
 CWaitableCriticalSection csBestBlock;
 CConditionVariable cvBlockChange;
 int nScriptCheckThreads = 0;
 std::atomic_bool fImporting(false);
 bool fReindex = false;
 bool fTxIndex = false;
 bool fHavePruned = false;
 bool fPruneMode = false;
 bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
 bool fRequireStandard = true;
 bool fCheckBlockIndex = false;
 bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
 size_t nCoinCacheUsage = 5000 * 300;
 uint64_t nPruneTarget = 0;
 int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
 
 uint256 hashAssumeValid;
 
 CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
 Amount maxTxFee = DEFAULT_TRANSACTION_MAXFEE;
 
 CTxMemPool mempool(::minRelayTxFee);
 
 static void CheckBlockIndex(const Consensus::Params &consensusParams);
 
 /** Constant stuff for coinbase transactions we create: */
 CScript COINBASE_FLAGS;
 
 const std::string strMessageMagic = "Bitcoin Signed Message:\n";
 
 // Internal stuff
 namespace {
 
 struct CBlockIndexWorkComparator {
     bool operator()(CBlockIndex *pa, CBlockIndex *pb) const {
         // First sort by most total work, ...
         if (pa->nChainWork > pb->nChainWork) return false;
         if (pa->nChainWork < pb->nChainWork) return true;
 
         // ... then by earliest time received, ...
         if (pa->nSequenceId < pb->nSequenceId) return false;
         if (pa->nSequenceId > pb->nSequenceId) return true;
 
         // Use pointer address as tie breaker (should only happen with blocks
         // loaded from disk, as those all have id 0).
         if (pa < pb) return false;
         if (pa > pb) return true;
 
         // Identical blocks.
         return false;
     }
 };
 
 CBlockIndex *pindexBestInvalid;
 
 /**
  * The set of all CBlockIndex entries with BLOCK_VALID_TRANSACTIONS (for itself
  * and all ancestors) and as good as our current tip or better. Entries may be
  * failed, though, and pruning nodes may be missing the data for the block.
  */
 std::set<CBlockIndex *, CBlockIndexWorkComparator> setBlockIndexCandidates;
 /**
  * All pairs A->B, where A (or one of its ancestors) misses transactions, but B
  * has transactions. Pruned nodes may have entries where B is missing data.
  */
 std::multimap<CBlockIndex *, CBlockIndex *> mapBlocksUnlinked;
 
 CCriticalSection cs_LastBlockFile;
 std::vector<CBlockFileInfo> vinfoBlockFile;
 int nLastBlockFile = 0;
 /**
  * Global flag to indicate we should check to see if there are block/undo files
  * that should be deleted. Set on startup or if we allocate more file space when
  * we're in prune mode.
  */
 bool fCheckForPruning = false;
 
 /**
  * Every received block is assigned a unique and increasing identifier, so we
  * know which one to give priority in case of a fork.
  */
 CCriticalSection cs_nBlockSequenceId;
 /** Blocks loaded from disk are assigned id 0, so start the counter at 1. */
 int32_t nBlockSequenceId = 1;
 /** Decreasing counter (used by subsequent preciousblock calls). */
 int32_t nBlockReverseSequenceId = -1;
 /** chainwork for the last block that preciousblock has been applied to. */
 arith_uint256 nLastPreciousChainwork = 0;
 
 /** Dirty block index entries. */
 std::set<CBlockIndex *> setDirtyBlockIndex;
 
 /** Dirty block file entries. */
 std::set<int> setDirtyFileInfo;
 } // namespace
 
-/**
- * Use this class to start tracking transactions that are removed from the
- * mempool and pass all those transactions through SyncTransaction when the
- * object goes out of scope. This is currently only used to call SyncTransaction
- * on conflicts removed from the mempool during block connection. Applied in
- * ActivateBestChain around ActivateBestStep which in turn calls:
- * ConnectTip->removeForBlock->removeConflicts
- */
-class MemPoolConflictRemovalTracker {
-private:
-    std::vector<CTransactionRef> conflictedTxs;
-    CTxMemPool &pool;
-
-public:
-    MemPoolConflictRemovalTracker(CTxMemPool &_pool) : pool(_pool) {
-        pool.NotifyEntryRemoved.connect(boost::bind(
-            &MemPoolConflictRemovalTracker::NotifyEntryRemoved, this, _1, _2));
-    }
-
-    void NotifyEntryRemoved(CTransactionRef txRemoved,
-                            MemPoolRemovalReason reason) {
-        if (reason == MemPoolRemovalReason::CONFLICT) {
-            conflictedTxs.push_back(txRemoved);
-        }
-    }
-
-    ~MemPoolConflictRemovalTracker() {
-        pool.NotifyEntryRemoved.disconnect(boost::bind(
-            &MemPoolConflictRemovalTracker::NotifyEntryRemoved, this, _1, _2));
-        for (const auto &tx : conflictedTxs) {
-            GetMainSignals().SyncTransaction(
-                *tx, nullptr, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
-        }
-        conflictedTxs.clear();
-    }
-};
-
 CBlockIndex *FindForkInGlobalIndex(const CChain &chain,
                                    const CBlockLocator &locator) {
     // Find the first block the caller has in the main chain
     for (const uint256 &hash : locator.vHave) {
         BlockMap::iterator mi = mapBlockIndex.find(hash);
         if (mi != mapBlockIndex.end()) {
             CBlockIndex *pindex = (*mi).second;
             if (chain.Contains(pindex)) return pindex;
             if (pindex->GetAncestor(chain.Height()) == chain.Tip()) {
                 return chain.Tip();
             }
         }
     }
     return chain.Genesis();
 }
 
 CCoinsViewCache *pcoinsTip = nullptr;
 CBlockTreeDB *pblocktree = nullptr;
 
 enum FlushStateMode {
     FLUSH_STATE_NONE,
     FLUSH_STATE_IF_NEEDED,
     FLUSH_STATE_PERIODIC,
     FLUSH_STATE_ALWAYS
 };
 
 // See definition for documentation
 static bool FlushStateToDisk(CValidationState &state, FlushStateMode mode,
                              int nManualPruneHeight = 0);
 static void FindFilesToPruneManual(std::set<int> &setFilesToPrune,
                                    int nManualPruneHeight);
 static uint32_t GetBlockScriptFlags(const CBlockIndex *pindex,
                                     const Config &config);
 
 static bool IsFinalTx(const CTransaction &tx, int nBlockHeight,
                       int64_t nBlockTime) {
     if (tx.nLockTime == 0) {
         return true;
     }
 
     int64_t lockTime = tx.nLockTime;
     int64_t lockTimeLimit =
         (lockTime < LOCKTIME_THRESHOLD) ? nBlockHeight : nBlockTime;
     if (lockTime < lockTimeLimit) {
         return true;
     }
 
     for (const auto &txin : tx.vin) {
         if (txin.nSequence != CTxIn::SEQUENCE_FINAL) {
             return false;
         }
     }
     return true;
 }
 
 /**
  * Calculates the block height and previous block's median time past at
  * which the transaction will be considered final in the context of BIP 68.
  * Also removes from the vector of input heights any entries which did not
  * correspond to sequence locked inputs as they do not affect the calculation.
  */
 static std::pair<int, int64_t>
 CalculateSequenceLocks(const CTransaction &tx, int flags,
                        std::vector<int> *prevHeights,
                        const CBlockIndex &block) {
     assert(prevHeights->size() == tx.vin.size());
 
     // Will be set to the equivalent height- and time-based nLockTime
     // values that would be necessary to satisfy all relative lock-
     // time constraints given our view of block chain history.
     // The semantics of nLockTime are the last invalid height/time, so
     // use -1 to have the effect of any height or time being valid.
     int nMinHeight = -1;
     int64_t nMinTime = -1;
 
     // tx.nVersion is signed integer so requires cast to unsigned otherwise
     // we would be doing a signed comparison and half the range of nVersion
     // wouldn't support BIP 68.
     bool fEnforceBIP68 = static_cast<uint32_t>(tx.nVersion) >= 2 &&
                          flags & LOCKTIME_VERIFY_SEQUENCE;
 
     // Do not enforce sequence numbers as a relative lock time
     // unless we have been instructed to
     if (!fEnforceBIP68) {
         return std::make_pair(nMinHeight, nMinTime);
     }
 
     for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
         const CTxIn &txin = tx.vin[txinIndex];
 
         // Sequence numbers with the most significant bit set are not
         // treated as relative lock-times, nor are they given any
         // consensus-enforced meaning at this point.
         if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_DISABLE_FLAG) {
             // The height of this input is not relevant for sequence locks
             (*prevHeights)[txinIndex] = 0;
             continue;
         }
 
         int nCoinHeight = (*prevHeights)[txinIndex];
 
         if (txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG) {
             int64_t nCoinTime = block.GetAncestor(std::max(nCoinHeight - 1, 0))
                                     ->GetMedianTimePast();
             // NOTE: Subtract 1 to maintain nLockTime semantics.
             // BIP 68 relative lock times have the semantics of calculating the
             // first block or time at which the transaction would be valid. When
             // calculating the effective block time or height for the entire
             // transaction, we switch to using the semantics of nLockTime which
             // is the last invalid block time or height. Thus we subtract 1 from
             // the calculated time or height.
 
             // Time-based relative lock-times are measured from the smallest
             // allowed timestamp of the block containing the txout being spent,
             // which is the median time past of the block prior.
             nMinTime = std::max(
                 nMinTime,
                 nCoinTime +
                     (int64_t)((txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK)
                               << CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) -
                     1);
         } else {
             nMinHeight = std::max(
                 nMinHeight,
                 nCoinHeight +
                     (int)(txin.nSequence & CTxIn::SEQUENCE_LOCKTIME_MASK) - 1);
         }
     }
 
     return std::make_pair(nMinHeight, nMinTime);
 }
 
 static bool EvaluateSequenceLocks(const CBlockIndex &block,
                                   std::pair<int, int64_t> lockPair) {
     assert(block.pprev);
     int64_t nBlockTime = block.pprev->GetMedianTimePast();
     if (lockPair.first >= block.nHeight || lockPair.second >= nBlockTime)
         return false;
 
     return true;
 }
 
 bool SequenceLocks(const CTransaction &tx, int flags,
                    std::vector<int> *prevHeights, const CBlockIndex &block) {
     return EvaluateSequenceLocks(
         block, CalculateSequenceLocks(tx, flags, prevHeights, block));
 }
 
 bool TestLockPointValidity(const LockPoints *lp) {
     AssertLockHeld(cs_main);
     assert(lp);
     // If there are relative lock times then the maxInputBlock will be set
     // If there are no relative lock times, the LockPoints don't depend on the
     // chain
     if (lp->maxInputBlock) {
         // Check whether chainActive is an extension of the block at which the
         // LockPoints
         // calculation was valid.  If not LockPoints are no longer valid
         if (!chainActive.Contains(lp->maxInputBlock)) {
             return false;
         }
     }
 
     // LockPoints still valid
     return true;
 }
 
 bool CheckSequenceLocks(const CTransaction &tx, int flags, LockPoints *lp,
                         bool useExistingLockPoints) {
     AssertLockHeld(cs_main);
     AssertLockHeld(mempool.cs);
 
     CBlockIndex *tip = chainActive.Tip();
     CBlockIndex index;
     index.pprev = tip;
     // CheckSequenceLocks() uses chainActive.Height()+1 to evaluate height based
     // locks because when SequenceLocks() is called within ConnectBlock(), the
     // height of the block *being* evaluated is what is used. Thus if we want to
     // know if a transaction can be part of the *next* block, we need to use one
     // more than chainActive.Height()
     index.nHeight = tip->nHeight + 1;
 
     std::pair<int, int64_t> lockPair;
     if (useExistingLockPoints) {
         assert(lp);
         lockPair.first = lp->height;
         lockPair.second = lp->time;
     } else {
         // pcoinsTip contains the UTXO set for chainActive.Tip()
         CCoinsViewMemPool viewMemPool(pcoinsTip, mempool);
         std::vector<int> prevheights;
         prevheights.resize(tx.vin.size());
         for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) {
             const CTxIn &txin = tx.vin[txinIndex];
             Coin coin;
             if (!viewMemPool.GetCoin(txin.prevout, coin)) {
                 return error("%s: Missing input", __func__);
             }
             if (coin.GetHeight() == MEMPOOL_HEIGHT) {
                 // Assume all mempool transaction confirm in the next block
                 prevheights[txinIndex] = tip->nHeight + 1;
             } else {
                 prevheights[txinIndex] = coin.GetHeight();
             }
         }
         lockPair = CalculateSequenceLocks(tx, flags, &prevheights, index);
         if (lp) {
             lp->height = lockPair.first;
             lp->time = lockPair.second;
             // Also store the hash of the block with the highest height of all
             // the blocks which have sequence locked prevouts. This hash needs
             // to still be on the chain for these LockPoint calculations to be
             // valid.
             // Note: It is impossible to correctly calculate a maxInputBlock if
             // any of the sequence locked inputs depend on unconfirmed txs,
             // except in the special case where the relative lock time/height is
             // 0, which is equivalent to no sequence lock. Since we assume input
             // height of tip+1 for mempool txs and test the resulting lockPair
             // from CalculateSequenceLocks against tip+1. We know
             // EvaluateSequenceLocks will fail if there was a non-zero sequence
             // lock on a mempool input, so we can use the return value of
             // CheckSequenceLocks to indicate the LockPoints validity
             int maxInputHeight = 0;
             for (int height : prevheights) {
                 // Can ignore mempool inputs since we'll fail if they had
                 // non-zero locks
                 if (height != tip->nHeight + 1) {
                     maxInputHeight = std::max(maxInputHeight, height);
                 }
             }
             lp->maxInputBlock = tip->GetAncestor(maxInputHeight);
         }
     }
     return EvaluateSequenceLocks(index, lockPair);
 }
 
 uint64_t GetSigOpCountWithoutP2SH(const CTransaction &tx) {
     uint64_t nSigOps = 0;
     for (const auto &txin : tx.vin) {
         nSigOps += txin.scriptSig.GetSigOpCount(false);
     }
     for (const auto &txout : tx.vout) {
         nSigOps += txout.scriptPubKey.GetSigOpCount(false);
     }
     return nSigOps;
 }
 
 uint64_t GetP2SHSigOpCount(const CTransaction &tx,
                            const CCoinsViewCache &inputs) {
     if (tx.IsCoinBase()) {
         return 0;
     }
 
     uint64_t nSigOps = 0;
     for (auto &i : tx.vin) {
         const CTxOut &prevout = inputs.GetOutputFor(i);
         if (prevout.scriptPubKey.IsPayToScriptHash()) {
             nSigOps += prevout.scriptPubKey.GetSigOpCount(i.scriptSig);
         }
     }
     return nSigOps;
 }
 
 uint64_t GetTransactionSigOpCount(const CTransaction &tx,
                                   const CCoinsViewCache &inputs, int flags) {
     uint64_t nSigOps = GetSigOpCountWithoutP2SH(tx);
     if (tx.IsCoinBase()) {
         return nSigOps;
     }
 
     if (flags & SCRIPT_VERIFY_P2SH) {
         nSigOps += GetP2SHSigOpCount(tx, inputs);
     }
 
     return nSigOps;
 }
 
 static bool CheckTransactionCommon(const CTransaction &tx,
                                    CValidationState &state,
                                    bool fCheckDuplicateInputs) {
     // Basic checks that don't depend on any context
     if (tx.vin.empty()) {
         return state.DoS(10, false, REJECT_INVALID, "bad-txns-vin-empty");
     }
 
     if (tx.vout.empty()) {
         return state.DoS(10, false, REJECT_INVALID, "bad-txns-vout-empty");
     }
 
     // Size limit
     if (::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION) > MAX_TX_SIZE) {
         return state.DoS(100, false, REJECT_INVALID, "bad-txns-oversize");
     }
 
     // Check for negative or overflow output values
     Amount nValueOut(0);
     for (const auto &txout : tx.vout) {
         if (txout.nValue < Amount(0)) {
             return state.DoS(100, false, REJECT_INVALID,
                              "bad-txns-vout-negative");
         }
 
         if (txout.nValue > MAX_MONEY) {
             return state.DoS(100, false, REJECT_INVALID,
                              "bad-txns-vout-toolarge");
         }
 
         nValueOut += txout.nValue;
         if (!MoneyRange(nValueOut)) {
             return state.DoS(100, false, REJECT_INVALID,
                              "bad-txns-txouttotal-toolarge");
         }
     }
 
     if (GetSigOpCountWithoutP2SH(tx) > MAX_TX_SIGOPS_COUNT) {
         return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops");
     }
 
     // Check for duplicate inputs - note that this check is slow so we skip it
     // in CheckBlock
     if (fCheckDuplicateInputs) {
         std::set<COutPoint> vInOutPoints;
         for (const auto &txin : tx.vin) {
             if (!vInOutPoints.insert(txin.prevout).second) {
                 return state.DoS(100, false, REJECT_INVALID,
                                  "bad-txns-inputs-duplicate");
             }
         }
     }
 
     return true;
 }
 
 bool CheckCoinbase(const CTransaction &tx, CValidationState &state,
                    bool fCheckDuplicateInputs) {
     if (!tx.IsCoinBase()) {
         return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false,
                          "first tx is not coinbase");
     }
 
     if (!CheckTransactionCommon(tx, state, fCheckDuplicateInputs)) {
         // CheckTransactionCommon fill in the state.
         return false;
     }
 
     if (tx.vin[0].scriptSig.size() < 2 || tx.vin[0].scriptSig.size() > 100) {
         return state.DoS(100, false, REJECT_INVALID, "bad-cb-length");
     }
 
     return true;
 }
 
 bool CheckRegularTransaction(const CTransaction &tx, CValidationState &state,
                              bool fCheckDuplicateInputs) {
     if (tx.IsCoinBase()) {
         return state.DoS(100, false, REJECT_INVALID, "bad-tx-coinbase");
     }
 
     if (!CheckTransactionCommon(tx, state, fCheckDuplicateInputs)) {
         // CheckTransactionCommon fill in the state.
         return false;
     }
 
     for (const auto &txin : tx.vin) {
         if (txin.prevout.IsNull()) {
             return state.DoS(10, false, REJECT_INVALID,
                              "bad-txns-prevout-null");
         }
     }
 
     return true;
 }
 
 void LimitMempoolSize(CTxMemPool &pool, size_t limit, unsigned long age) {
     int expired = pool.Expire(GetTime() - age);
     if (expired != 0) {
         LogPrint(BCLog::MEMPOOL,
                  "Expired %i transactions from the memory pool\n", expired);
     }
 
     std::vector<COutPoint> vNoSpendsRemaining;
     pool.TrimToSize(limit, &vNoSpendsRemaining);
     for (const COutPoint &removed : vNoSpendsRemaining) {
         pcoinsTip->Uncache(removed);
     }
 }
 
 /** Convert CValidationState to a human-readable message for logging */
 std::string FormatStateMessage(const CValidationState &state) {
     return strprintf(
         "%s%s (code %i)", state.GetRejectReason(),
         state.GetDebugMessage().empty() ? "" : ", " + state.GetDebugMessage(),
         state.GetRejectCode());
 }
 
 static bool IsCurrentForFeeEstimation() {
     AssertLockHeld(cs_main);
     if (IsInitialBlockDownload()) {
         return false;
     }
     if (chainActive.Tip()->GetBlockTime() <
         (GetTime() - MAX_FEE_ESTIMATION_TIP_AGE)) {
         return false;
     }
     if (chainActive.Height() < pindexBestHeader->nHeight - 1) {
         return false;
     }
     return true;
 }
 
 static bool IsUAHFenabled(const Config &config, int nHeight) {
     return nHeight >= config.GetChainParams().GetConsensus().uahfHeight;
 }
 
 bool IsUAHFenabled(const Config &config, const CBlockIndex *pindexPrev) {
     if (pindexPrev == nullptr) {
         return false;
     }
 
     return IsUAHFenabled(config, pindexPrev->nHeight);
 }
 
 static bool IsDAAEnabled(const Config &config, int nHeight) {
     return nHeight >= config.GetChainParams().GetConsensus().daaHeight;
 }
 
 bool IsDAAEnabled(const Config &config, const CBlockIndex *pindexPrev) {
     if (pindexPrev == nullptr) {
         return false;
     }
 
     return IsDAAEnabled(config, pindexPrev->nHeight);
 }
 
 // Used to avoid mempool polluting consensus critical paths if CCoinsViewMempool
 // were somehow broken and returning the wrong scriptPubKeys
 static bool CheckInputsFromMempoolAndCache(const CTransaction &tx,
                                            CValidationState &state,
                                            const CCoinsViewCache &view,
                                            CTxMemPool &pool, uint32_t flags,
                                            bool cacheSigStore,
                                            PrecomputedTransactionData &txdata) {
     AssertLockHeld(cs_main);
 
     // pool.cs should be locked already, but go ahead and re-take the lock here
     // to enforce that mempool doesn't change between when we check the view and
     // when we actually call through to CheckInputs
     LOCK(pool.cs);
 
     assert(!tx.IsCoinBase());
     for (const CTxIn &txin : tx.vin) {
         const Coin &coin = view.AccessCoin(txin.prevout);
 
         // At this point we haven't actually checked if the coins are all
         // available (or shouldn't assume we have, since CheckInputs does). So
         // we just return failure if the inputs are not available here, and then
         // only have to check equivalence for available inputs.
         if (coin.IsSpent()) {
             return false;
         }
 
         const CTransactionRef &txFrom = pool.get(txin.prevout.hash);
         if (txFrom) {
             assert(txFrom->GetHash() == txin.prevout.hash);
             assert(txFrom->vout.size() > txin.prevout.n);
             assert(txFrom->vout[txin.prevout.n] == coin.GetTxOut());
         } else {
             const Coin &coinFromDisk = pcoinsTip->AccessCoin(txin.prevout);
             assert(!coinFromDisk.IsSpent());
             assert(coinFromDisk.GetTxOut() == coin.GetTxOut());
         }
     }
 
     return CheckInputs(tx, state, view, true, flags, cacheSigStore, true,
                        txdata);
 }
 
 static bool AcceptToMemoryPoolWorker(
     const Config &config, CTxMemPool &pool, CValidationState &state,
     const CTransactionRef &ptx, bool fLimitFree, bool *pfMissingInputs,
     int64_t nAcceptTime, std::list<CTransactionRef> *plTxnReplaced,
     bool fOverrideMempoolLimit, const Amount nAbsurdFee,
     std::vector<COutPoint> &coins_to_uncache) {
     AssertLockHeld(cs_main);
 
     const CTransaction &tx = *ptx;
     const uint256 txid = tx.GetId();
     if (pfMissingInputs) {
         *pfMissingInputs = false;
     }
 
     // Coinbase is only valid in a block, not as a loose transaction.
     if (!CheckRegularTransaction(tx, state, true)) {
         // state filled in by CheckRegularTransaction.
         return false;
     }
 
     // Rather not work on nonstandard transactions (unless -testnet/-regtest)
     std::string reason;
     if (fRequireStandard && !IsStandardTx(tx, reason)) {
         return state.DoS(0, false, REJECT_NONSTANDARD, reason);
     }
 
     // Only accept nLockTime-using transactions that can be mined in the next
     // block; we don't want our mempool filled up with transactions that can't
     // be mined yet.
     CValidationState ctxState;
     if (!ContextualCheckTransactionForCurrentBlock(
             config, tx, ctxState, STANDARD_LOCKTIME_VERIFY_FLAGS)) {
         // We copy the state from a dummy to ensure we don't increase the
         // ban score of peer for transaction that could be valid in the future.
         return state.DoS(
             0, false, REJECT_NONSTANDARD, ctxState.GetRejectReason(),
             ctxState.CorruptionPossible(), ctxState.GetDebugMessage());
     }
 
     // Is it already in the memory pool?
     if (pool.exists(txid)) {
         return state.Invalid(false, REJECT_ALREADY_KNOWN,
                              "txn-already-in-mempool");
     }
 
     // Check for conflicts with in-memory transactions
     {
         // Protect pool.mapNextTx
         LOCK(pool.cs);
         for (const CTxIn &txin : tx.vin) {
             auto itConflicting = pool.mapNextTx.find(txin.prevout);
             if (itConflicting != pool.mapNextTx.end()) {
                 // Disable replacement feature for good
                 return state.Invalid(false, REJECT_CONFLICT,
                                      "txn-mempool-conflict");
             }
         }
     }
 
     {
         CCoinsView dummy;
         CCoinsViewCache view(&dummy);
 
         Amount nValueIn(0);
         LockPoints lp;
         {
             LOCK(pool.cs);
             CCoinsViewMemPool viewMemPool(pcoinsTip, pool);
             view.SetBackend(viewMemPool);
 
             // Do we already have it?
             for (size_t out = 0; out < tx.vout.size(); out++) {
                 COutPoint outpoint(txid, out);
                 bool had_coin_in_cache = pcoinsTip->HaveCoinInCache(outpoint);
                 if (view.HaveCoin(outpoint)) {
                     if (!had_coin_in_cache) {
                         coins_to_uncache.push_back(outpoint);
                     }
 
                     return state.Invalid(false, REJECT_ALREADY_KNOWN,
                                          "txn-already-known");
                 }
             }
 
             // Do all inputs exist?
             for (const CTxIn txin : tx.vin) {
                 if (!pcoinsTip->HaveCoinInCache(txin.prevout)) {
                     coins_to_uncache.push_back(txin.prevout);
                 }
 
                 if (!view.HaveCoin(txin.prevout)) {
                     if (pfMissingInputs) {
                         *pfMissingInputs = true;
                     }
 
                     // fMissingInputs and !state.IsInvalid() is used to detect
                     // this condition, don't set state.Invalid()
                     return false;
                 }
             }
 
             // Are the actual inputs available?
             if (!view.HaveInputs(tx)) {
                 return state.Invalid(false, REJECT_DUPLICATE,
                                      "bad-txns-inputs-spent");
             }
 
             // Bring the best block into scope.
             view.GetBestBlock();
 
             nValueIn = view.GetValueIn(tx);
 
             // We have all inputs cached now, so switch back to dummy, so we
             // don't need to keep lock on mempool.
             view.SetBackend(dummy);
 
             // Only accept BIP68 sequence locked transactions that can be mined
             // in the next block; we don't want our mempool filled up with
             // transactions that can't be mined yet. Must keep pool.cs for this
             // unless we change CheckSequenceLocks to take a CoinsViewCache
             // instead of create its own.
             if (!CheckSequenceLocks(tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) {
                 return state.DoS(0, false, REJECT_NONSTANDARD,
                                  "non-BIP68-final");
             }
         }
 
         // Check for non-standard pay-to-script-hash in inputs
         if (fRequireStandard && !AreInputsStandard(tx, view)) {
             return state.Invalid(false, REJECT_NONSTANDARD,
                                  "bad-txns-nonstandard-inputs");
         }
 
         int64_t nSigOpsCount =
             GetTransactionSigOpCount(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
 
         Amount nValueOut = tx.GetValueOut();
         Amount nFees = nValueIn - nValueOut;
         // nModifiedFees includes any fee deltas from PrioritiseTransaction
         Amount nModifiedFees = nFees;
         double nPriorityDummy = 0;
         pool.ApplyDeltas(txid, nPriorityDummy, nModifiedFees);
 
         Amount inChainInputValue;
         double dPriority =
             view.GetPriority(tx, chainActive.Height(), inChainInputValue);
 
         // Keep track of transactions that spend a coinbase, which we re-scan
         // during reorgs to ensure COINBASE_MATURITY is still met.
         bool fSpendsCoinbase = false;
         for (const CTxIn &txin : tx.vin) {
             const Coin &coin = view.AccessCoin(txin.prevout);
             if (coin.IsCoinBase()) {
                 fSpendsCoinbase = true;
                 break;
             }
         }
 
         CTxMemPoolEntry entry(ptx, nFees, nAcceptTime, dPriority,
                               chainActive.Height(), inChainInputValue,
                               fSpendsCoinbase, nSigOpsCount, lp);
         unsigned int nSize = entry.GetTxSize();
 
         // Check that the transaction doesn't have an excessive number of
         // sigops, making it impossible to mine. Since the coinbase transaction
         // itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
         // MAX_BLOCK_SIGOPS_PER_MB; we still consider this an invalid rather
         // than merely non-standard transaction.
         if (nSigOpsCount > MAX_STANDARD_TX_SIGOPS) {
             return state.DoS(0, false, REJECT_NONSTANDARD,
                              "bad-txns-too-many-sigops", false,
                              strprintf("%d", nSigOpsCount));
         }
 
         Amount mempoolRejectFee =
             pool.GetMinFee(GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
                            1000000)
                 .GetFee(nSize);
         if (mempoolRejectFee > Amount(0) && nModifiedFees < mempoolRejectFee) {
             return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
                              "mempool min fee not met", false,
                              strprintf("%d < %d", nFees, mempoolRejectFee));
         }
 
         if (GetBoolArg("-relaypriority", DEFAULT_RELAYPRIORITY) &&
             nModifiedFees < ::minRelayTxFee.GetFee(nSize) &&
             !AllowFree(entry.GetPriority(chainActive.Height() + 1))) {
             // Require that free transactions have sufficient priority to be
             // mined in the next block.
             return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
                              "insufficient priority");
         }
 
         // Continuously rate-limit free (really, very-low-fee) transactions.
         // This mitigates 'penny-flooding' -- sending thousands of free
         // transactions just to be annoying or make others' transactions take
         // longer to confirm.
         if (fLimitFree && nModifiedFees < ::minRelayTxFee.GetFee(nSize)) {
             static CCriticalSection csFreeLimiter;
             static double dFreeCount;
             static int64_t nLastTime;
             int64_t nNow = GetTime();
 
             LOCK(csFreeLimiter);
 
             // Use an exponentially decaying ~10-minute window:
             dFreeCount *= pow(1.0 - 1.0 / 600.0, double(nNow - nLastTime));
             nLastTime = nNow;
             // -limitfreerelay unit is thousand-bytes-per-minute
             // At default rate it would take over a month to fill 1GB
             if (dFreeCount + nSize >=
                 GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) * 10 * 1000) {
                 return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
                                  "rate limited free transaction");
             }
 
             LogPrint(BCLog::MEMPOOL, "Rate limit dFreeCount: %g => %g\n",
                      dFreeCount, dFreeCount + nSize);
             dFreeCount += nSize;
         }
 
         if (nAbsurdFee != Amount(0) && nFees > nAbsurdFee) {
             return state.Invalid(false, REJECT_HIGHFEE, "absurdly-high-fee",
                                  strprintf("%d > %d", nFees, nAbsurdFee));
         }
 
         // Calculate in-mempool ancestors, up to a limit.
         CTxMemPool::setEntries setAncestors;
         size_t nLimitAncestors =
             GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
         size_t nLimitAncestorSize =
             GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
         size_t nLimitDescendants =
             GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
         size_t nLimitDescendantSize =
             GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) *
             1000;
         std::string errString;
         if (!pool.CalculateMemPoolAncestors(
                 entry, setAncestors, nLimitAncestors, nLimitAncestorSize,
                 nLimitDescendants, nLimitDescendantSize, errString)) {
             return state.DoS(0, false, REJECT_NONSTANDARD,
                              "too-long-mempool-chain", false, errString);
         }
 
         uint32_t scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS;
         if (!Params().RequireStandard()) {
             scriptVerifyFlags =
                 GetArg("-promiscuousmempoolflags", scriptVerifyFlags);
         }
 
         // Check against previous transactions. This is done last to help
         // prevent CPU exhaustion denial-of-service attacks.
         PrecomputedTransactionData txdata(tx);
         if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, false,
                          txdata)) {
             // State filled in by CheckInputs.
             return false;
         }
 
         // Check again against the current block tip's script verification flags
         // to cache our script execution flags. This is, of course, useless if
         // the next block has different script flags from the previous one, but
         // because the cache tracks script flags for us it will auto-invalidate
         // and we'll just have a few blocks of extra misses on soft-fork
         // activation.
         //
         // This is also useful in case of bugs in the standard flags that cause
         // transactions to pass as valid when they're actually invalid. For
         // instance the STRICTENC flag was incorrectly allowing certain CHECKSIG
         // NOT scripts to pass, even though they were invalid.
         //
         // There is a similar check in CreateNewBlock() to prevent creating
         // invalid blocks (using TestBlockValidity), however allowing such
         // transactions into the mempool can be exploited as a DoS attack.
         uint32_t currentBlockScriptVerifyFlags =
             GetBlockScriptFlags(chainActive.Tip(), config);
         if (!CheckInputsFromMempoolAndCache(tx, state, view, pool,
                                             currentBlockScriptVerifyFlags, true,
                                             txdata)) {
             // If we're using promiscuousmempoolflags, we may hit this normally.
             // Check if current block has some flags that scriptVerifyFlags does
             // not before printing an ominous warning.
             if (!(~scriptVerifyFlags & currentBlockScriptVerifyFlags)) {
                 return error(
                     "%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against "
                     "MANDATORY but not STANDARD flags %s, %s",
                     __func__, txid.ToString(), FormatStateMessage(state));
             }
 
             if (!CheckInputs(tx, state, view, true,
                              MANDATORY_SCRIPT_VERIFY_FLAGS, true, false,
                              txdata)) {
                 return error(
                     "%s: ConnectInputs failed against MANDATORY but not "
                     "STANDARD flags due to promiscuous mempool %s, %s",
                     __func__, txid.ToString(), FormatStateMessage(state));
             }
 
             LogPrintf("Warning: -promiscuousmempool flags set to not include "
                       "currently enforced soft forks, this may break mining or "
                       "otherwise cause instability!\n");
         }
 
         // This transaction should only count for fee estimation if
         // the node is not behind and it is not dependent on any other
         // transactions in the mempool.
         bool validForFeeEstimation =
             IsCurrentForFeeEstimation() && pool.HasNoInputsOf(tx);
 
         // Store transaction in memory.
         pool.addUnchecked(txid, entry, setAncestors, validForFeeEstimation);
 
         // Trim mempool and check if tx was trimmed.
         if (!fOverrideMempoolLimit) {
             LimitMempoolSize(
                 pool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
                 GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
             if (!pool.exists(txid)) {
                 return state.DoS(0, false, REJECT_INSUFFICIENTFEE,
                                  "mempool full");
             }
         }
     }
 
-    GetMainSignals().SyncTransaction(
-        tx, nullptr, CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
-
+    GetMainSignals().TransactionAddedToMempool(ptx);
     return true;
 }
 
 static bool AcceptToMemoryPoolWithTime(
     const Config &config, CTxMemPool &pool, CValidationState &state,
     const CTransactionRef &tx, bool fLimitFree, bool *pfMissingInputs,
     int64_t nAcceptTime, std::list<CTransactionRef> *plTxnReplaced = nullptr,
     bool fOverrideMempoolLimit = false, const Amount nAbsurdFee = Amount(0)) {
     std::vector<COutPoint> coins_to_uncache;
     bool res = AcceptToMemoryPoolWorker(
         config, pool, state, tx, fLimitFree, pfMissingInputs, nAcceptTime,
         plTxnReplaced, fOverrideMempoolLimit, nAbsurdFee, coins_to_uncache);
     if (!res) {
         for (const COutPoint &outpoint : coins_to_uncache) {
             pcoinsTip->Uncache(outpoint);
         }
     }
 
     // After we've (potentially) uncached entries, ensure our coins cache is
     // still within its size limits
     CValidationState stateDummy;
     FlushStateToDisk(stateDummy, FLUSH_STATE_PERIODIC);
     return res;
 }
 
 bool AcceptToMemoryPool(const Config &config, CTxMemPool &pool,
                         CValidationState &state, const CTransactionRef &tx,
                         bool fLimitFree, bool *pfMissingInputs,
                         std::list<CTransactionRef> *plTxnReplaced,
                         bool fOverrideMempoolLimit, const Amount nAbsurdFee) {
     return AcceptToMemoryPoolWithTime(config, pool, state, tx, fLimitFree,
                                       pfMissingInputs, GetTime(), plTxnReplaced,
                                       fOverrideMempoolLimit, nAbsurdFee);
 }
 
 /** Return transaction in txOut, and if it was found inside a block, its hash is
  * placed in hashBlock */
 bool GetTransaction(const Config &config, const uint256 &txid,
                     CTransactionRef &txOut, uint256 &hashBlock,
                     bool fAllowSlow) {
     CBlockIndex *pindexSlow = nullptr;
 
     LOCK(cs_main);
 
     CTransactionRef ptx = mempool.get(txid);
     if (ptx) {
         txOut = ptx;
         return true;
     }
 
     if (fTxIndex) {
         CDiskTxPos postx;
         if (pblocktree->ReadTxIndex(txid, postx)) {
             CAutoFile file(OpenBlockFile(postx, true), SER_DISK,
                            CLIENT_VERSION);
             if (file.IsNull())
                 return error("%s: OpenBlockFile failed", __func__);
             CBlockHeader header;
             try {
                 file >> header;
                 fseek(file.Get(), postx.nTxOffset, SEEK_CUR);
                 file >> txOut;
             } catch (const std::exception &e) {
                 return error("%s: Deserialize or I/O error - %s", __func__,
                              e.what());
             }
             hashBlock = header.GetHash();
             if (txOut->GetId() != txid)
                 return error("%s: txid mismatch", __func__);
             return true;
         }
     }
 
     // use coin database to locate block that contains transaction, and scan it
     if (fAllowSlow) {
         const Coin &coin = AccessByTxid(*pcoinsTip, txid);
         if (!coin.IsSpent()) {
             pindexSlow = chainActive[coin.GetHeight()];
         }
     }
 
     if (pindexSlow) {
         CBlock block;
         if (ReadBlockFromDisk(block, pindexSlow, config)) {
             for (const auto &tx : block.vtx) {
                 if (tx->GetId() == txid) {
                     txOut = tx;
                     hashBlock = pindexSlow->GetBlockHash();
                     return true;
                 }
             }
         }
     }
 
     return false;
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // CBlock and CBlockIndex
 //
 
 bool WriteBlockToDisk(const CBlock &block, CDiskBlockPos &pos,
                       const CMessageHeader::MessageMagic &messageStart) {
     // Open history file to append
     CAutoFile fileout(OpenBlockFile(pos), SER_DISK, CLIENT_VERSION);
     if (fileout.IsNull()) {
         return error("WriteBlockToDisk: OpenBlockFile failed");
     }
 
     // Write index header
     unsigned int nSize = GetSerializeSize(fileout, block);
     fileout << FLATDATA(messageStart) << nSize;
 
     // Write block
     long fileOutPos = ftell(fileout.Get());
     if (fileOutPos < 0) {
         return error("WriteBlockToDisk: ftell failed");
     }
 
     pos.nPos = (unsigned int)fileOutPos;
     fileout << block;
 
     return true;
 }
 
 bool ReadBlockFromDisk(CBlock &block, const CDiskBlockPos &pos,
                        const Config &config) {
     block.SetNull();
 
     // Open history file to read
     CAutoFile filein(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION);
     if (filein.IsNull()) {
         return error("ReadBlockFromDisk: OpenBlockFile failed for %s",
                      pos.ToString());
     }
 
     // Read block
     try {
         filein >> block;
     } catch (const std::exception &e) {
         return error("%s: Deserialize or I/O error - %s at %s", __func__,
                      e.what(), pos.ToString());
     }
 
     // Check the header
     if (!CheckProofOfWork(block.GetHash(), block.nBits, config)) {
         return error("ReadBlockFromDisk: Errors in block header at %s",
                      pos.ToString());
     }
 
     return true;
 }
 
 bool ReadBlockFromDisk(CBlock &block, const CBlockIndex *pindex,
                        const Config &config) {
     if (!ReadBlockFromDisk(block, pindex->GetBlockPos(), config)) {
         return false;
     }
 
     if (block.GetHash() != pindex->GetBlockHash()) {
         return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() "
                      "doesn't match index for %s at %s",
                      pindex->ToString(), pindex->GetBlockPos().ToString());
     }
 
     return true;
 }
 
 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams) {
     int halvings = nHeight / consensusParams.nSubsidyHalvingInterval;
     // Force block reward to zero when right shift is undefined.
     if (halvings >= 64) return Amount(0);
 
     Amount nSubsidy = 50 * COIN;
     // Subsidy is cut in half every 210,000 blocks which will occur
     // approximately every 4 years.
     return Amount(nSubsidy.GetSatoshis() >> halvings);
 }
 
 bool IsInitialBlockDownload() {
     const CChainParams &chainParams = Params();
 
     // Once this function has returned false, it must remain false.
     static std::atomic<bool> latchToFalse{false};
     // Optimization: pre-test latch before taking the lock.
     if (latchToFalse.load(std::memory_order_relaxed)) return false;
 
     LOCK(cs_main);
     if (latchToFalse.load(std::memory_order_relaxed)) return false;
     if (fImporting || fReindex) return true;
     if (chainActive.Tip() == nullptr) return true;
     if (chainActive.Tip()->nChainWork <
         UintToArith256(chainParams.GetConsensus().nMinimumChainWork))
         return true;
     if (chainActive.Tip()->GetBlockTime() < (GetTime() - nMaxTipAge))
         return true;
     LogPrintf("Leaving InitialBlockDownload (latching to false)\n");
     latchToFalse.store(true, std::memory_order_relaxed);
     return false;
 }
 
 CBlockIndex *pindexBestForkTip = nullptr, *pindexBestForkBase = nullptr;
 
 static void AlertNotify(const std::string &strMessage) {
     uiInterface.NotifyAlertChanged();
     std::string strCmd = GetArg("-alertnotify", "");
     if (strCmd.empty()) return;
 
     // Alert text should be plain ascii coming from a trusted source, but to be
     // safe we first strip anything not in safeChars, then add single quotes
     // around the whole string before passing it to the shell:
     std::string singleQuote("'");
     std::string safeStatus = SanitizeString(strMessage);
     safeStatus = singleQuote + safeStatus + singleQuote;
     boost::replace_all(strCmd, "%s", safeStatus);
 
     boost::thread t(runCommand, strCmd); // thread runs free
 }
 
 void CheckForkWarningConditions() {
     AssertLockHeld(cs_main);
     // Before we get past initial download, we cannot reliably alert about forks
     // (we assume we don't get stuck on a fork before finishing our initial
     // sync)
     if (IsInitialBlockDownload()) return;
 
     // If our best fork is no longer within 72 blocks (+/- 12 hours if no one
     // mines it) of our head, drop it
     if (pindexBestForkTip &&
         chainActive.Height() - pindexBestForkTip->nHeight >= 72)
         pindexBestForkTip = nullptr;
 
     if (pindexBestForkTip ||
         (pindexBestInvalid &&
          pindexBestInvalid->nChainWork >
              chainActive.Tip()->nChainWork +
                  (GetBlockProof(*chainActive.Tip()) * 6))) {
         if (!GetfLargeWorkForkFound() && pindexBestForkBase) {
             std::string warning =
                 std::string("'Warning: Large-work fork detected, forking after "
                             "block ") +
                 pindexBestForkBase->phashBlock->ToString() + std::string("'");
             AlertNotify(warning);
         }
         if (pindexBestForkTip && pindexBestForkBase) {
             LogPrintf("%s: Warning: Large valid fork found\n  forking the "
                       "chain at height %d (%s)\n  lasting to height %d "
                       "(%s).\nChain state database corruption likely.\n",
                       __func__, pindexBestForkBase->nHeight,
                       pindexBestForkBase->phashBlock->ToString(),
                       pindexBestForkTip->nHeight,
                       pindexBestForkTip->phashBlock->ToString());
             SetfLargeWorkForkFound(true);
         } else {
             LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks "
                       "longer than our best chain.\nChain state database "
                       "corruption likely.\n",
                       __func__);
             SetfLargeWorkInvalidChainFound(true);
         }
     } else {
         SetfLargeWorkForkFound(false);
         SetfLargeWorkInvalidChainFound(false);
     }
 }
 
 void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip) {
     AssertLockHeld(cs_main);
     // If we are on a fork that is sufficiently large, set a warning flag
     CBlockIndex *pfork = pindexNewForkTip;
     CBlockIndex *plonger = chainActive.Tip();
     while (pfork && pfork != plonger) {
         while (plonger && plonger->nHeight > pfork->nHeight)
             plonger = plonger->pprev;
         if (pfork == plonger) break;
         pfork = pfork->pprev;
     }
 
     // We define a condition where we should warn the user about as a fork of at
     // least 7 blocks with a tip within 72 blocks (+/- 12 hours if no one mines
     // it) of ours. We use 7 blocks rather arbitrarily as it represents just
     // under 10% of sustained network hash rate operating on the fork, or a
     // chain that is entirely longer than ours and invalid (note that this
     // should be detected by both). We define it this way because it allows us
     // to only store the highest fork tip (+ base) which meets the 7-block
     // condition and from this always have the most-likely-to-cause-warning fork
     if (pfork && (!pindexBestForkTip ||
                   (pindexBestForkTip &&
                    pindexNewForkTip->nHeight > pindexBestForkTip->nHeight)) &&
         pindexNewForkTip->nChainWork - pfork->nChainWork >
             (GetBlockProof(*pfork) * 7) &&
         chainActive.Height() - pindexNewForkTip->nHeight < 72) {
         pindexBestForkTip = pindexNewForkTip;
         pindexBestForkBase = pfork;
     }
 
     CheckForkWarningConditions();
 }
 
 static void InvalidChainFound(CBlockIndex *pindexNew) {
     if (!pindexBestInvalid ||
         pindexNew->nChainWork > pindexBestInvalid->nChainWork)
         pindexBestInvalid = pindexNew;
 
     LogPrintf(
         "%s: invalid block=%s  height=%d  log2_work=%.8g  date=%s\n", __func__,
         pindexNew->GetBlockHash().ToString(), pindexNew->nHeight,
         log(pindexNew->nChainWork.getdouble()) / log(2.0),
         DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexNew->GetBlockTime()));
     CBlockIndex *tip = chainActive.Tip();
     assert(tip);
     LogPrintf("%s:  current best=%s  height=%d  log2_work=%.8g  date=%s\n",
               __func__, tip->GetBlockHash().ToString(), chainActive.Height(),
               log(tip->nChainWork.getdouble()) / log(2.0),
               DateTimeStrFormat("%Y-%m-%d %H:%M:%S", tip->GetBlockTime()));
     CheckForkWarningConditions();
 }
 
 static void InvalidBlockFound(CBlockIndex *pindex,
                               const CValidationState &state) {
     if (!state.CorruptionPossible()) {
         pindex->nStatus |= BLOCK_FAILED_VALID;
         setDirtyBlockIndex.insert(pindex);
         setBlockIndexCandidates.erase(pindex);
         InvalidChainFound(pindex);
     }
 }
 
 void UpdateCoins(const CTransaction &tx, CCoinsViewCache &inputs,
                  CTxUndo &txundo, int nHeight) {
     // Mark inputs spent.
     if (!tx.IsCoinBase()) {
         txundo.vprevout.reserve(tx.vin.size());
         for (const CTxIn &txin : tx.vin) {
             txundo.vprevout.emplace_back();
             bool is_spent =
                 inputs.SpendCoin(txin.prevout, &txundo.vprevout.back());
             assert(is_spent);
         }
     }
 
     // Add outputs.
     AddCoins(inputs, tx, nHeight);
 }
 
 void UpdateCoins(const CTransaction &tx, CCoinsViewCache &inputs, int nHeight) {
     CTxUndo txundo;
     UpdateCoins(tx, inputs, txundo, nHeight);
 }
 
 bool CScriptCheck::operator()() {
     const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
     return VerifyScript(scriptSig, scriptPubKey, nFlags,
                         CachingTransactionSignatureChecker(ptxTo, nIn, amount,
                                                            cacheStore, txdata),
                         &error);
 }
 
 int GetSpendHeight(const CCoinsViewCache &inputs) {
     LOCK(cs_main);
     CBlockIndex *pindexPrev = mapBlockIndex.find(inputs.GetBestBlock())->second;
     return pindexPrev->nHeight + 1;
 }
 
 namespace Consensus {
 bool CheckTxInputs(const CTransaction &tx, CValidationState &state,
                    const CCoinsViewCache &inputs, int nSpendHeight) {
     // This doesn't trigger the DoS code on purpose; if it did, it would make it
     // easier for an attacker to attempt to split the network.
     if (!inputs.HaveInputs(tx)) {
         return state.Invalid(false, 0, "", "Inputs unavailable");
     }
 
     Amount nValueIn(0);
     Amount nFees(0);
     for (size_t i = 0; i < tx.vin.size(); i++) {
         const COutPoint &prevout = tx.vin[i].prevout;
         const Coin &coin = inputs.AccessCoin(prevout);
         assert(!coin.IsSpent());
 
         // If prev is coinbase, check that it's matured
         if (coin.IsCoinBase()) {
             if (nSpendHeight - coin.GetHeight() < COINBASE_MATURITY) {
                 return state.Invalid(
                     false, REJECT_INVALID,
                     "bad-txns-premature-spend-of-coinbase",
                     strprintf("tried to spend coinbase at depth %d",
                               nSpendHeight - coin.GetHeight()));
             }
         }
 
         // Check for negative or overflow input values
         nValueIn += coin.GetTxOut().nValue;
         if (!MoneyRange(coin.GetTxOut().nValue) || !MoneyRange(nValueIn)) {
             return state.DoS(100, false, REJECT_INVALID,
                              "bad-txns-inputvalues-outofrange");
         }
     }
 
     if (nValueIn < tx.GetValueOut()) {
         return state.DoS(100, false, REJECT_INVALID, "bad-txns-in-belowout",
                          false, strprintf("value in (%s) < value out (%s)",
                                           FormatMoney(nValueIn),
                                           FormatMoney(tx.GetValueOut())));
     }
 
     // Tally transaction fees
     Amount nTxFee = nValueIn - tx.GetValueOut();
     if (nTxFee < Amount(0)) {
         return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-negative");
     }
     nFees += nTxFee;
     if (!MoneyRange(nFees)) {
         return state.DoS(100, false, REJECT_INVALID, "bad-txns-fee-outofrange");
     }
 
     return true;
 }
 } // namespace Consensus
 
 bool CheckInputs(const CTransaction &tx, CValidationState &state,
                  const CCoinsViewCache &inputs, bool fScriptChecks,
                  uint32_t flags, bool sigCacheStore, bool scriptCacheStore,
                  const PrecomputedTransactionData &txdata,
                  std::vector<CScriptCheck> *pvChecks) {
     assert(!tx.IsCoinBase());
 
     if (!Consensus::CheckTxInputs(tx, state, inputs, GetSpendHeight(inputs))) {
         return false;
     }
 
     if (pvChecks) {
         pvChecks->reserve(tx.vin.size());
     }
 
     // The first loop above does all the inexpensive checks. Only if ALL inputs
     // pass do we perform expensive ECDSA signature checks. Helps prevent CPU
     // exhaustion attacks.
 
     // Skip script verification when connecting blocks under the assumedvalid
     // block. Assuming the assumedvalid block is valid this is safe because
     // block merkle hashes are still computed and checked, of course, if an
     // assumed valid block is invalid due to false scriptSigs this optimization
     // would allow an invalid chain to be accepted.
     if (!fScriptChecks) {
         return true;
     }
 
     // First check if script executions have been cached with the same flags.
     // Note that this assumes that the inputs provided are correct (ie that the
     // transaction hash which is in tx's prevouts properly commits to the
     // scriptPubKey in the inputs view of that transaction).
     uint256 hashCacheEntry = GetScriptCacheKey(tx, flags);
     if (IsKeyInScriptCache(hashCacheEntry, !scriptCacheStore)) {
         return true;
     }
 
     for (size_t i = 0; i < tx.vin.size(); i++) {
         const COutPoint &prevout = tx.vin[i].prevout;
         const Coin &coin = inputs.AccessCoin(prevout);
         assert(!coin.IsSpent());
 
         // We very carefully only pass in things to CScriptCheck which are
         // clearly committed to by tx' witness hash. This provides a sanity
         // check that our caching is not introducing consensus failures through
         // additional data in, eg, the coins being spent being checked as a part
         // of CScriptCheck.
         const CScript &scriptPubKey = coin.GetTxOut().scriptPubKey;
         const Amount amount = coin.GetTxOut().nValue;
 
         // Verify signature
         CScriptCheck check(scriptPubKey, amount, tx, i, flags, sigCacheStore,
                            txdata);
         if (pvChecks) {
             pvChecks->push_back(std::move(check));
         } else if (!check()) {
             if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
                 // Check whether the failure was caused by a non-mandatory
                 // script verification check, such as non-standard DER encodings
                 // or non-null dummy arguments; if so, don't trigger DoS
                 // protection to avoid splitting the network between upgraded
                 // and non-upgraded nodes.
                 CScriptCheck check2(scriptPubKey, amount, tx, i,
                                     flags &
                                         ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS,
                                     sigCacheStore, txdata);
                 if (check2()) {
                     return state.Invalid(
                         false, REJECT_NONSTANDARD,
                         strprintf("non-mandatory-script-verify-flag (%s)",
                                   ScriptErrorString(check.GetScriptError())));
                 }
             }
 
             // Failures of other flags indicate a transaction that is invalid in
             // new blocks, e.g. a invalid P2SH. We DoS ban such nodes as they
             // are not following the protocol. That said during an upgrade
             // careful thought should be taken as to the correct behavior - we
             // may want to continue peering with non-upgraded nodes even after
             // soft-fork super-majority signaling has occurred.
             return state.DoS(
                 100, false, REJECT_INVALID,
                 strprintf("mandatory-script-verify-flag-failed (%s)",
                           ScriptErrorString(check.GetScriptError())));
         }
     }
 
     if (scriptCacheStore && !pvChecks) {
         // We executed all of the provided scripts, and were told to cache the
         // result. Do so now.
         AddKeyInScriptCache(hashCacheEntry);
     }
 
     return true;
 }
 
 namespace {
 
 bool UndoWriteToDisk(const CBlockUndo &blockundo, CDiskBlockPos &pos,
                      const uint256 &hashBlock,
                      const CMessageHeader::MessageMagic &messageStart) {
     // Open history file to append
     CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
     if (fileout.IsNull()) return error("%s: OpenUndoFile failed", __func__);
 
     // Write index header
     unsigned int nSize = GetSerializeSize(fileout, blockundo);
     fileout << FLATDATA(messageStart) << nSize;
 
     // Write undo data
     long fileOutPos = ftell(fileout.Get());
     if (fileOutPos < 0) return error("%s: ftell failed", __func__);
     pos.nPos = (unsigned int)fileOutPos;
     fileout << blockundo;
 
     // calculate & write checksum
     CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
     hasher << hashBlock;
     hasher << blockundo;
     fileout << hasher.GetHash();
 
     return true;
 }
 
 bool UndoReadFromDisk(CBlockUndo &blockundo, const CDiskBlockPos &pos,
                       const uint256 &hashBlock) {
     // Open history file to read
     CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
     if (filein.IsNull()) {
         return error("%s: OpenUndoFile failed", __func__);
     }
 
     // Read block
     uint256 hashChecksum;
     // We need a CHashVerifier as reserializing may lose data
     CHashVerifier<CAutoFile> verifier(&filein);
     try {
         verifier << hashBlock;
         verifier >> blockundo;
         filein >> hashChecksum;
     } catch (const std::exception &e) {
         return error("%s: Deserialize or I/O error - %s", __func__, e.what());
     }
 
     // Verify checksum
     if (hashChecksum != verifier.GetHash()) {
         return error("%s: Checksum mismatch", __func__);
     }
 
     return true;
 }
 
 /** Abort with a message */
 bool AbortNode(const std::string &strMessage,
                const std::string &userMessage = "") {
     SetMiscWarning(strMessage);
     LogPrintf("*** %s\n", strMessage);
     uiInterface.ThreadSafeMessageBox(
         userMessage.empty() ? _("Error: A fatal internal error occurred, see "
                                 "debug.log for details")
                             : userMessage,
         "", CClientUIInterface::MSG_ERROR);
     StartShutdown();
     return false;
 }
 
 bool AbortNode(CValidationState &state, const std::string &strMessage,
                const std::string &userMessage = "") {
     AbortNode(strMessage, userMessage);
     return state.Error(strMessage);
 }
 
 } // namespace
 
 /** Restore the UTXO in a Coin at a given COutPoint. */
 DisconnectResult UndoCoinSpend(const Coin &undo, CCoinsViewCache &view,
                                const COutPoint &out) {
     bool fClean = true;
 
     if (view.HaveCoin(out)) {
         // Overwriting transaction output.
         fClean = false;
     }
 
     if (undo.GetHeight() == 0) {
         // Missing undo metadata (height and coinbase). Older versions included
         // this information only in undo records for the last spend of a
         // transactions' outputs. This implies that it must be present for some
         // other output of the same tx.
         const Coin &alternate = AccessByTxid(view, out.hash);
         if (alternate.IsSpent()) {
             // Adding output for transaction without known metadata
             return DISCONNECT_FAILED;
         }
 
         // This is somewhat ugly, but hopefully utility is limited. This is only
         // useful when working from legacy on disck data. In any case, putting
         // the correct information in there doesn't hurt.
         const_cast<Coin &>(undo) = Coin(undo.GetTxOut(), alternate.GetHeight(),
                                         alternate.IsCoinBase());
     }
 
     view.AddCoin(out, undo, undo.IsCoinBase());
     return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
 }
 
 /**
  * Undo the effects of this block (with given index) on the UTXO set represented
  * by coins. When UNCLEAN or FAILED is returned, view is left in an
  * indeterminate state.
  */
 static DisconnectResult DisconnectBlock(const CBlock &block,
                                         const CBlockIndex *pindex,
                                         CCoinsViewCache &view) {
     assert(pindex->GetBlockHash() == view.GetBestBlock());
 
     CBlockUndo blockUndo;
     CDiskBlockPos pos = pindex->GetUndoPos();
     if (pos.IsNull()) {
         error("DisconnectBlock(): no undo data available");
         return DISCONNECT_FAILED;
     }
 
     if (!UndoReadFromDisk(blockUndo, pos, pindex->pprev->GetBlockHash())) {
         error("DisconnectBlock(): failure reading undo data");
         return DISCONNECT_FAILED;
     }
 
     return ApplyBlockUndo(blockUndo, block, pindex, view);
 }
 
 DisconnectResult ApplyBlockUndo(const CBlockUndo &blockUndo,
                                 const CBlock &block, const CBlockIndex *pindex,
                                 CCoinsViewCache &view) {
     bool fClean = true;
 
     if (blockUndo.vtxundo.size() + 1 != block.vtx.size()) {
         error("DisconnectBlock(): block and undo data inconsistent");
         return DISCONNECT_FAILED;
     }
 
     // Undo transactions in reverse order.
     size_t i = block.vtx.size();
     while (i-- > 0) {
         const CTransaction &tx = *(block.vtx[i]);
         uint256 txid = tx.GetId();
 
         // Check that all outputs are available and match the outputs in the
         // block itself exactly.
         for (size_t o = 0; o < tx.vout.size(); o++) {
             if (tx.vout[o].scriptPubKey.IsUnspendable()) {
                 continue;
             }
 
             COutPoint out(txid, o);
             Coin coin;
             bool is_spent = view.SpendCoin(out, &coin);
             if (!is_spent || tx.vout[o] != coin.GetTxOut()) {
                 // transaction output mismatch
                 fClean = false;
             }
         }
 
         // Restore inputs.
         if (i < 1) {
             // Skip the coinbase.
             continue;
         }
 
         const CTxUndo &txundo = blockUndo.vtxundo[i - 1];
         if (txundo.vprevout.size() != tx.vin.size()) {
             error("DisconnectBlock(): transaction and undo data inconsistent");
             return DISCONNECT_FAILED;
         }
 
         for (size_t j = tx.vin.size(); j-- > 0;) {
             const COutPoint &out = tx.vin[j].prevout;
             const Coin &undo = txundo.vprevout[j];
             DisconnectResult res = UndoCoinSpend(undo, view, out);
             if (res == DISCONNECT_FAILED) {
                 return DISCONNECT_FAILED;
             }
             fClean = fClean && res != DISCONNECT_UNCLEAN;
         }
     }
 
     // Move best block pointer to previous block.
     view.SetBestBlock(block.hashPrevBlock);
 
     return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
 }
 
 static void FlushBlockFile(bool fFinalize = false) {
     LOCK(cs_LastBlockFile);
 
     CDiskBlockPos posOld(nLastBlockFile, 0);
 
     FILE *fileOld = OpenBlockFile(posOld);
     if (fileOld) {
         if (fFinalize)
             TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nSize);
         FileCommit(fileOld);
         fclose(fileOld);
     }
 
     fileOld = OpenUndoFile(posOld);
     if (fileOld) {
         if (fFinalize)
             TruncateFile(fileOld, vinfoBlockFile[nLastBlockFile].nUndoSize);
         FileCommit(fileOld);
         fclose(fileOld);
     }
 }
 
 bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos,
                  unsigned int nAddSize);
 
 static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
 
 void ThreadScriptCheck() {
     RenameThread("bitcoin-scriptch");
     scriptcheckqueue.Thread();
 }
 
 // Protected by cs_main
 VersionBitsCache versionbitscache;
 
 int32_t ComputeBlockVersion(const CBlockIndex *pindexPrev,
                             const Consensus::Params &params) {
     LOCK(cs_main);
     int32_t nVersion = VERSIONBITS_TOP_BITS;
 
     for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
         ThresholdState state = VersionBitsState(
             pindexPrev, params, (Consensus::DeploymentPos)i, versionbitscache);
         if (state == THRESHOLD_LOCKED_IN || state == THRESHOLD_STARTED) {
             nVersion |= VersionBitsMask(params, (Consensus::DeploymentPos)i);
         }
     }
 
     return nVersion;
 }
 
 /**
  * Threshold condition checker that triggers when unknown versionbits are seen
  * on the network.
  */
 class WarningBitsConditionChecker : public AbstractThresholdConditionChecker {
 private:
     int bit;
 
 public:
     WarningBitsConditionChecker(int bitIn) : bit(bitIn) {}
 
     int64_t BeginTime(const Consensus::Params &params) const override {
         return 0;
     }
     int64_t EndTime(const Consensus::Params &params) const override {
         return std::numeric_limits<int64_t>::max();
     }
     int Period(const Consensus::Params &params) const override {
         return params.nMinerConfirmationWindow;
     }
     int Threshold(const Consensus::Params &params) const override {
         return params.nRuleChangeActivationThreshold;
     }
 
     bool Condition(const CBlockIndex *pindex,
                    const Consensus::Params &params) const override {
         return ((pindex->nVersion & VERSIONBITS_TOP_MASK) ==
                 VERSIONBITS_TOP_BITS) &&
                ((pindex->nVersion >> bit) & 1) != 0 &&
                ((ComputeBlockVersion(pindex->pprev, params) >> bit) & 1) == 0;
     }
 };
 
 // Protected by cs_main
 static ThresholdConditionCache warningcache[VERSIONBITS_NUM_BITS];
 
 // Returns the script flags which should be checked for a given block
 static uint32_t GetBlockScriptFlags(const CBlockIndex *pindex,
                                     const Config &config) {
     AssertLockHeld(cs_main);
     const Consensus::Params &consensusparams =
         config.GetChainParams().GetConsensus();
 
     // BIP16 didn't become active until Apr 1 2012
     int64_t nBIP16SwitchTime = 1333238400;
     bool fStrictPayToScriptHash = (pindex->GetBlockTime() >= nBIP16SwitchTime);
 
     uint32_t flags =
         fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE;
 
     // Start enforcing the DERSIG (BIP66) rule
     if (pindex->nHeight >= consensusparams.BIP66Height) {
         flags |= SCRIPT_VERIFY_DERSIG;
     }
 
     // Start enforcing CHECKLOCKTIMEVERIFY (BIP65) rule
     if (pindex->nHeight >= consensusparams.BIP65Height) {
         flags |= SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY;
     }
 
     // Start enforcing BIP112 (CHECKSEQUENCEVERIFY) using versionbits logic.
     if (VersionBitsState(pindex->pprev, consensusparams,
                          Consensus::DEPLOYMENT_CSV,
                          versionbitscache) == THRESHOLD_ACTIVE) {
         flags |= SCRIPT_VERIFY_CHECKSEQUENCEVERIFY;
     }
 
     // If the UAHF is enabled, we start accepting replay protected txns
     if (IsUAHFenabled(config, pindex->pprev)) {
         flags |= SCRIPT_VERIFY_STRICTENC;
         flags |= SCRIPT_ENABLE_SIGHASH_FORKID;
     }
 
     // If the DAA HF is enabled, we start rejecting transaction that use a high
     // s in their signature. We also make sure that signature that are supposed
     // to fail (for instance in multisig or other forms of smart contracts) are
     // null.
     if (IsDAAEnabled(config, pindex->pprev)) {
         flags |= SCRIPT_VERIFY_LOW_S;
         flags |= SCRIPT_VERIFY_NULLFAIL;
     }
 
     return flags;
 }
 
 static int64_t nTimeCheck = 0;
 static int64_t nTimeForks = 0;
 static int64_t nTimeVerify = 0;
 static int64_t nTimeConnect = 0;
 static int64_t nTimeIndex = 0;
 static int64_t nTimeCallbacks = 0;
 static int64_t nTimeTotal = 0;
 
 /**
  * Apply the effects of this block (with given index) on the UTXO set
  * represented by coins. Validity checks that depend on the UTXO set are also
  * done; ConnectBlock() can fail if those validity checks fail (among other
  * reasons).
  */
 static bool ConnectBlock(const Config &config, const CBlock &block,
                          CValidationState &state, CBlockIndex *pindex,
                          CCoinsViewCache &view, const CChainParams &chainparams,
                          bool fJustCheck = false) {
     AssertLockHeld(cs_main);
 
     int64_t nTimeStart = GetTimeMicros();
 
     // Check it again in case a previous version let a bad block in
     if (!CheckBlock(config, block, state, !fJustCheck, !fJustCheck)) {
         return error("%s: Consensus::CheckBlock: %s", __func__,
                      FormatStateMessage(state));
     }
 
     // Verify that the view's current state corresponds to the previous block
     uint256 hashPrevBlock =
         pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash();
     assert(hashPrevBlock == view.GetBestBlock());
 
     // Special case for the genesis block, skipping connection of its
     // transactions (its coinbase is unspendable)
     if (block.GetHash() == chainparams.GetConsensus().hashGenesisBlock) {
         if (!fJustCheck) {
             view.SetBestBlock(pindex->GetBlockHash());
         }
 
         return true;
     }
 
     bool fScriptChecks = true;
     if (!hashAssumeValid.IsNull()) {
         // We've been configured with the hash of a block which has been
         // externally verified to have a valid history. A suitable default value
         // is included with the software and updated from time to time. Because
         // validity relative to a piece of software is an objective fact these
         // defaults can be easily reviewed. This setting doesn't force the
         // selection of any particular chain but makes validating some faster by
         // effectively caching the result of part of the verification.
         BlockMap::const_iterator it = mapBlockIndex.find(hashAssumeValid);
         if (it != mapBlockIndex.end()) {
             if (it->second->GetAncestor(pindex->nHeight) == pindex &&
                 pindexBestHeader->GetAncestor(pindex->nHeight) == pindex &&
                 pindexBestHeader->nChainWork >=
                     UintToArith256(
                         chainparams.GetConsensus().nMinimumChainWork)) {
                 // This block is a member of the assumed verified chain and an
                 // ancestor of the best header. The equivalent time check
                 // discourages hashpower from extorting the network via DOS
                 // attack into accepting an invalid block through telling users
                 // they must manually set assumevalid. Requiring a software
                 // change or burying the invalid block, regardless of the
                 // setting, makes it hard to hide the implication of the demand.
                 // This also avoids having release candidates that are hardly
                 // doing any signature verification at all in testing without
                 // having to artificially set the default assumed verified block
                 // further back. The test against nMinimumChainWork prevents the
                 // skipping when denied access to any chain at least as good as
                 // the expected chain.
                 fScriptChecks =
                     (GetBlockProofEquivalentTime(
                          *pindexBestHeader, *pindex, *pindexBestHeader,
                          chainparams.GetConsensus()) <= 60 * 60 * 24 * 7 * 2);
             }
         }
     }
 
     int64_t nTime1 = GetTimeMicros();
     nTimeCheck += nTime1 - nTimeStart;
     LogPrint(BCLog::BENCH, "    - Sanity checks: %.2fms [%.2fs]\n",
              0.001 * (nTime1 - nTimeStart), nTimeCheck * 0.000001);
 
     // Do not allow blocks that contain transactions which 'overwrite' older
     // transactions, unless those are already completely spent. If such
     // overwrites are allowed, coinbases and transactions depending upon those
     // can be duplicated to remove the ability to spend the first instance --
     // even after being sent to another address. See BIP30 and
     // http://r6.ca/blog/20120206T005236Z.html for more information. This logic
     // is not necessary for memory pool transactions, as AcceptToMemoryPool
     // already refuses previously-known transaction ids entirely. This rule was
     // originally applied to all blocks with a timestamp after March 15, 2012,
     // 0:00 UTC. Now that the whole chain is irreversibly beyond that time it is
     // applied to all blocks except the two in the chain that violate it. This
     // prevents exploiting the issue against nodes during their initial block
     // download.
     bool fEnforceBIP30 = (!pindex->phashBlock) || // Enforce on CreateNewBlock
                                                   // invocations which don't
                                                   // have a hash.
                          !((pindex->nHeight == 91842 &&
                             pindex->GetBlockHash() ==
                                 uint256S("0x00000000000a4d0a398161ffc163c503763"
                                          "b1f4360639393e0e4c8e300e0caec")) ||
                            (pindex->nHeight == 91880 &&
                             pindex->GetBlockHash() ==
                                 uint256S("0x00000000000743f190a18c5577a3c2d2a1f"
                                          "610ae9601ac046a38084ccb7cd721")));
 
     // Once BIP34 activated it was not possible to create new duplicate
     // coinbases and thus other than starting with the 2 existing duplicate
     // coinbase pairs, not possible to create overwriting txs. But by the time
     // BIP34 activated, in each of the existing pairs the duplicate coinbase had
     // overwritten the first before the first had been spent. Since those
     // coinbases are sufficiently buried its no longer possible to create
     // further duplicate transactions descending from the known pairs either. If
     // we're on the known chain at height greater than where BIP34 activated, we
     // can save the db accesses needed for the BIP30 check.
     CBlockIndex *pindexBIP34height =
         pindex->pprev->GetAncestor(chainparams.GetConsensus().BIP34Height);
     // Only continue to enforce if we're below BIP34 activation height or the
     // block hash at that height doesn't correspond.
     fEnforceBIP30 = fEnforceBIP30 && (!pindexBIP34height ||
                                       !(pindexBIP34height->GetBlockHash() ==
                                         chainparams.GetConsensus().BIP34Hash));
 
     if (fEnforceBIP30) {
         for (const auto &tx : block.vtx) {
             for (size_t o = 0; o < tx->vout.size(); o++) {
                 if (view.HaveCoin(COutPoint(tx->GetHash(), o))) {
                     return state.DoS(
                         100,
                         error("ConnectBlock(): tried to overwrite transaction"),
                         REJECT_INVALID, "bad-txns-BIP30");
                 }
             }
         }
     }
 
     // Start enforcing BIP68 (sequence locks) using versionbits logic.
     int nLockTimeFlags = 0;
     if (VersionBitsState(pindex->pprev, chainparams.GetConsensus(),
                          Consensus::DEPLOYMENT_CSV,
                          versionbitscache) == THRESHOLD_ACTIVE) {
         nLockTimeFlags |= LOCKTIME_VERIFY_SEQUENCE;
     }
 
     uint32_t flags = GetBlockScriptFlags(pindex, config);
 
     int64_t nTime2 = GetTimeMicros();
     nTimeForks += nTime2 - nTime1;
     LogPrint(BCLog::BENCH, "    - Fork checks: %.2fms [%.2fs]\n",
              0.001 * (nTime2 - nTime1), nTimeForks * 0.000001);
 
     CBlockUndo blockundo;
 
     CCheckQueueControl<CScriptCheck> control(fScriptChecks ? &scriptcheckqueue
                                                            : nullptr);
 
     std::vector<int> prevheights;
     Amount nFees(0);
     int nInputs = 0;
 
     // Sigops counting. We need to do it again because of P2SH.
     uint64_t nSigOpsCount = 0;
     const uint64_t currentBlockSize =
         ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION);
     const uint64_t nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize);
 
     CDiskTxPos pos(pindex->GetBlockPos(),
                    GetSizeOfCompactSize(block.vtx.size()));
     std::vector<std::pair<uint256, CDiskTxPos>> vPos;
     vPos.reserve(block.vtx.size());
     blockundo.vtxundo.reserve(block.vtx.size() - 1);
 
     for (size_t i = 0; i < block.vtx.size(); i++) {
         const CTransaction &tx = *(block.vtx[i]);
 
         nInputs += tx.vin.size();
 
         if (!tx.IsCoinBase()) {
             if (!view.HaveInputs(tx)) {
                 return state.DoS(
                     100, error("ConnectBlock(): inputs missing/spent"),
                     REJECT_INVALID, "bad-txns-inputs-missingorspent");
             }
 
             // Check that transaction is BIP68 final BIP68 lock checks (as
             // opposed to nLockTime checks) must be in ConnectBlock because they
             // require the UTXO set.
             prevheights.resize(tx.vin.size());
             for (size_t j = 0; j < tx.vin.size(); j++) {
                 prevheights[j] = view.AccessCoin(tx.vin[j].prevout).GetHeight();
             }
 
             if (!SequenceLocks(tx, nLockTimeFlags, &prevheights, *pindex)) {
                 return state.DoS(
                     100, error("%s: contains a non-BIP68-final transaction",
                                __func__),
                     REJECT_INVALID, "bad-txns-nonfinal");
             }
         }
 
         // GetTransactionSigOpCount counts 2 types of sigops:
         // * legacy (always)
         // * p2sh (when P2SH enabled in flags and excludes coinbase)
         auto txSigOpsCount = GetTransactionSigOpCount(tx, view, flags);
         if (txSigOpsCount > MAX_TX_SIGOPS_COUNT) {
             return state.DoS(100, false, REJECT_INVALID, "bad-txn-sigops");
         }
 
         nSigOpsCount += txSigOpsCount;
         if (nSigOpsCount > nMaxSigOpsCount) {
             return state.DoS(100, error("ConnectBlock(): too many sigops"),
                              REJECT_INVALID, "bad-blk-sigops");
         }
 
         if (!tx.IsCoinBase()) {
             Amount fee = view.GetValueIn(tx) - tx.GetValueOut();
             nFees += fee;
 
             // Don't cache results if we're actually connecting blocks (still
             // consult the cache, though).
             bool fCacheResults = fJustCheck;
 
             std::vector<CScriptCheck> vChecks;
             if (!CheckInputs(tx, state, view, fScriptChecks, flags,
                              fCacheResults, fCacheResults,
                              PrecomputedTransactionData(tx), &vChecks)) {
                 return error("ConnectBlock(): CheckInputs on %s failed with %s",
                              tx.GetId().ToString(), FormatStateMessage(state));
             }
 
             control.Add(vChecks);
         }
 
         CTxUndo undoDummy;
         if (i > 0) {
             blockundo.vtxundo.push_back(CTxUndo());
         }
         UpdateCoins(tx, view, i == 0 ? undoDummy : blockundo.vtxundo.back(),
                     pindex->nHeight);
 
         vPos.push_back(std::make_pair(tx.GetId(), pos));
         pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
     }
 
     int64_t nTime3 = GetTimeMicros();
     nTimeConnect += nTime3 - nTime2;
     LogPrint(BCLog::BENCH,
              "      - Connect %u transactions: %.2fms (%.3fms/tx, "
              "%.3fms/txin) [%.2fs]\n",
              (unsigned)block.vtx.size(), 0.001 * (nTime3 - nTime2),
              0.001 * (nTime3 - nTime2) / block.vtx.size(),
              nInputs <= 1 ? 0 : 0.001 * (nTime3 - nTime2) / (nInputs - 1),
              nTimeConnect * 0.000001);
 
     Amount blockReward =
         nFees + GetBlockSubsidy(pindex->nHeight, chainparams.GetConsensus());
     if (block.vtx[0]->GetValueOut() > blockReward) {
         return state.DoS(100, error("ConnectBlock(): coinbase pays too much "
                                     "(actual=%d vs limit=%d)",
                                     block.vtx[0]->GetValueOut(), blockReward),
                          REJECT_INVALID, "bad-cb-amount");
     }
 
     if (!control.Wait()) {
         return state.DoS(100, false, REJECT_INVALID, "blk-bad-inputs", false,
                          "parallel script check failed");
     }
 
     int64_t nTime4 = GetTimeMicros();
     nTimeVerify += nTime4 - nTime2;
     LogPrint(BCLog::BENCH,
              "    - Verify %u txins: %.2fms (%.3fms/txin) [%.2fs]\n",
              nInputs - 1, 0.001 * (nTime4 - nTime2),
              nInputs <= 1 ? 0 : 0.001 * (nTime4 - nTime2) / (nInputs - 1),
              nTimeVerify * 0.000001);
 
     if (fJustCheck) {
         return true;
     }
 
     // Write undo information to disk
     if (pindex->GetUndoPos().IsNull() ||
         !pindex->IsValid(BLOCK_VALID_SCRIPTS)) {
         if (pindex->GetUndoPos().IsNull()) {
             CDiskBlockPos _pos;
             if (!FindUndoPos(
                     state, pindex->nFile, _pos,
                     ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) +
                         40)) {
                 return error("ConnectBlock(): FindUndoPos failed");
             }
             if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(),
                                  chainparams.DiskMagic())) {
                 return AbortNode(state, "Failed to write undo data");
             }
 
             // update nUndoPos in block index
             pindex->nUndoPos = _pos.nPos;
             pindex->nStatus |= BLOCK_HAVE_UNDO;
         }
 
         pindex->RaiseValidity(BLOCK_VALID_SCRIPTS);
         setDirtyBlockIndex.insert(pindex);
     }
 
     if (fTxIndex && !pblocktree->WriteTxIndex(vPos)) {
         return AbortNode(state, "Failed to write transaction index");
     }
 
     // add this block to the view's block chain
     view.SetBestBlock(pindex->GetBlockHash());
 
     int64_t nTime5 = GetTimeMicros();
     nTimeIndex += nTime5 - nTime4;
     LogPrint(BCLog::BENCH, "    - Index writing: %.2fms [%.2fs]\n",
              0.001 * (nTime5 - nTime4), nTimeIndex * 0.000001);
 
-    // Watch for changes to the previous coinbase transaction.
-    static uint256 hashPrevBestCoinBase;
-    GetMainSignals().UpdatedTransaction(hashPrevBestCoinBase);
-    hashPrevBestCoinBase = block.vtx[0]->GetId();
-
     int64_t nTime6 = GetTimeMicros();
     nTimeCallbacks += nTime6 - nTime5;
     LogPrint(BCLog::BENCH, "    - Callbacks: %.2fms [%.2fs]\n",
              0.001 * (nTime6 - nTime5), nTimeCallbacks * 0.000001);
 
     return true;
 }
 
 /**
  * Update the on-disk chain state.
  * The caches and indexes are flushed depending on the mode we're called with if
  * they're too large, if it's been a while since the last write, or always and
  * in all cases if we're in prune mode and are deleting files.
  */
 static bool FlushStateToDisk(CValidationState &state, FlushStateMode mode,
                              int nManualPruneHeight) {
     int64_t nMempoolUsage = mempool.DynamicMemoryUsage();
     const CChainParams &chainparams = Params();
     LOCK2(cs_main, cs_LastBlockFile);
     static int64_t nLastWrite = 0;
     static int64_t nLastFlush = 0;
     static int64_t nLastSetChain = 0;
     std::set<int> setFilesToPrune;
     bool fFlushForPrune = false;
     try {
         if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) &&
             !fReindex) {
             if (nManualPruneHeight > 0) {
                 FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight);
             } else {
                 FindFilesToPrune(setFilesToPrune,
                                  chainparams.PruneAfterHeight());
                 fCheckForPruning = false;
             }
             if (!setFilesToPrune.empty()) {
                 fFlushForPrune = true;
                 if (!fHavePruned) {
                     pblocktree->WriteFlag("prunedblockfiles", true);
                     fHavePruned = true;
                 }
             }
         }
         int64_t nNow = GetTimeMicros();
         // Avoid writing/flushing immediately after startup.
         if (nLastWrite == 0) {
             nLastWrite = nNow;
         }
         if (nLastFlush == 0) {
             nLastFlush = nNow;
         }
         if (nLastSetChain == 0) {
             nLastSetChain = nNow;
         }
         int64_t nMempoolSizeMax =
             GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
         int64_t cacheSize =
             pcoinsTip->DynamicMemoryUsage() * DB_PEAK_USAGE_FACTOR;
         int64_t nTotalSpace =
             nCoinCacheUsage +
             std::max<int64_t>(nMempoolSizeMax - nMempoolUsage, 0);
         // The cache is large and we're within 10% and 200 MiB or 50% and 50MiB
         // of the limit, but we have time now (not in the middle of a block
         // processing).
         bool fCacheLarge =
             mode == FLUSH_STATE_PERIODIC &&
             cacheSize >
                 std::min(std::max(nTotalSpace / 2,
                                   nTotalSpace -
                                       MIN_BLOCK_COINSDB_USAGE * 1024 * 1024),
                          std::max((9 * nTotalSpace) / 10,
                                   nTotalSpace -
                                       MAX_BLOCK_COINSDB_USAGE * 1024 * 1024));
         // The cache is over the limit, we have to write now.
         bool fCacheCritical =
             mode == FLUSH_STATE_IF_NEEDED && cacheSize > nTotalSpace;
         // It's been a while since we wrote the block index to disk. Do this
         // frequently, so we don't need to redownload after a crash.
         bool fPeriodicWrite =
             mode == FLUSH_STATE_PERIODIC &&
             nNow > nLastWrite + (int64_t)DATABASE_WRITE_INTERVAL * 1000000;
         // It's been very long since we flushed the cache. Do this infrequently,
         // to optimize cache usage.
         bool fPeriodicFlush =
             mode == FLUSH_STATE_PERIODIC &&
             nNow > nLastFlush + (int64_t)DATABASE_FLUSH_INTERVAL * 1000000;
         // Combine all conditions that result in a full cache flush.
         bool fDoFullFlush = (mode == FLUSH_STATE_ALWAYS) || fCacheLarge ||
                             fCacheCritical || fPeriodicFlush || fFlushForPrune;
         // Write blocks and block index to disk.
         if (fDoFullFlush || fPeriodicWrite) {
             // Depend on nMinDiskSpace to ensure we can write block index
             if (!CheckDiskSpace(0)) return state.Error("out of disk space");
             // First make sure all block and undo data is flushed to disk.
             FlushBlockFile();
             // Then update all block file information (which may refer to block
             // and undo files).
             {
                 std::vector<std::pair<int, const CBlockFileInfo *>> vFiles;
                 vFiles.reserve(setDirtyFileInfo.size());
                 for (std::set<int>::iterator it = setDirtyFileInfo.begin();
                      it != setDirtyFileInfo.end();) {
                     vFiles.push_back(std::make_pair(*it, &vinfoBlockFile[*it]));
                     setDirtyFileInfo.erase(it++);
                 }
                 std::vector<const CBlockIndex *> vBlocks;
                 vBlocks.reserve(setDirtyBlockIndex.size());
                 for (std::set<CBlockIndex *>::iterator it =
                          setDirtyBlockIndex.begin();
                      it != setDirtyBlockIndex.end();) {
                     vBlocks.push_back(*it);
                     setDirtyBlockIndex.erase(it++);
                 }
                 if (!pblocktree->WriteBatchSync(vFiles, nLastBlockFile,
                                                 vBlocks)) {
                     return AbortNode(state,
                                      "Failed to write to block index database");
                 }
             }
             // Finally remove any pruned files
             if (fFlushForPrune) UnlinkPrunedFiles(setFilesToPrune);
             nLastWrite = nNow;
         }
         // Flush best chain related state. This can only be done if the blocks /
         // block index write was also done.
         if (fDoFullFlush) {
             // Typical Coin structures on disk are around 48 bytes in size.
             // Pushing a new one to the database can cause it to be written
             // twice (once in the log, and once in the tables). This is already
             // an overestimation, as most will delete an existing entry or
             // overwrite one. Still, use a conservative safety factor of 2.
             if (!CheckDiskSpace(48 * 2 * 2 * pcoinsTip->GetCacheSize())) {
                 return state.Error("out of disk space");
             }
             // Flush the chainstate (which may refer to block index entries).
             if (!pcoinsTip->Flush()) {
                 return AbortNode(state, "Failed to write to coin database");
             }
             nLastFlush = nNow;
         }
         if (fDoFullFlush ||
             ((mode == FLUSH_STATE_ALWAYS || mode == FLUSH_STATE_PERIODIC) &&
              nNow >
                  nLastSetChain + (int64_t)DATABASE_WRITE_INTERVAL * 1000000)) {
             // Update best block in wallet (so we can detect restored wallets).
             GetMainSignals().SetBestChain(chainActive.GetLocator());
             nLastSetChain = nNow;
         }
     } catch (const std::runtime_error &e) {
         return AbortNode(
             state, std::string("System error while flushing: ") + e.what());
     }
     return true;
 }
 
 void FlushStateToDisk() {
     CValidationState state;
     FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
 }
 
 void PruneAndFlush() {
     CValidationState state;
     fCheckForPruning = true;
     FlushStateToDisk(state, FLUSH_STATE_NONE);
 }
 
 /** Update chainActive and related internal data structures. */
 static void UpdateTip(const Config &config, CBlockIndex *pindexNew) {
     const CChainParams &chainParams = config.GetChainParams();
 
     chainActive.SetTip(pindexNew);
 
     // New best block
     mempool.AddTransactionsUpdated(1);
 
     cvBlockChange.notify_all();
 
     static bool fWarned = false;
     std::vector<std::string> warningMessages;
     if (!IsInitialBlockDownload()) {
         int nUpgraded = 0;
         const CBlockIndex *pindex = chainActive.Tip();
         for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
             WarningBitsConditionChecker checker(bit);
             ThresholdState state = checker.GetStateFor(
                 pindex, chainParams.GetConsensus(), warningcache[bit]);
             if (state == THRESHOLD_ACTIVE || state == THRESHOLD_LOCKED_IN) {
                 if (state == THRESHOLD_ACTIVE) {
                     std::string strWarning =
                         strprintf(_("Warning: unknown new rules activated "
                                     "(versionbit %i)"),
                                   bit);
                     SetMiscWarning(strWarning);
                     if (!fWarned) {
                         AlertNotify(strWarning);
                         fWarned = true;
                     }
                 } else {
                     warningMessages.push_back(
                         strprintf("unknown new rules are about to activate "
                                   "(versionbit %i)",
                                   bit));
                 }
             }
         }
         // Check the version of the last 100 blocks to see if we need to
         // upgrade:
         for (int i = 0; i < 100 && pindex != nullptr; i++) {
             int32_t nExpectedVersion =
                 ComputeBlockVersion(pindex->pprev, chainParams.GetConsensus());
             if (pindex->nVersion > VERSIONBITS_LAST_OLD_BLOCK_VERSION &&
                 (pindex->nVersion & ~nExpectedVersion) != 0)
                 ++nUpgraded;
             pindex = pindex->pprev;
         }
         if (nUpgraded > 0)
             warningMessages.push_back(strprintf(
                 "%d of last 100 blocks have unexpected version", nUpgraded));
         if (nUpgraded > 100 / 2) {
             std::string strWarning =
                 _("Warning: Unknown block versions being mined! It's possible "
                   "unknown rules are in effect");
             // notify GetWarnings(), called by Qt and the JSON-RPC code to warn
             // the user:
             SetMiscWarning(strWarning);
             if (!fWarned) {
                 AlertNotify(strWarning);
                 fWarned = true;
             }
         }
     }
     LogPrintf(
         "%s: new best=%s height=%d version=0x%08x log2_work=%.8g tx=%lu "
         "date='%s' progress=%f cache=%.1fMiB(%utxo)",
         __func__, chainActive.Tip()->GetBlockHash().ToString(),
         chainActive.Height(), chainActive.Tip()->nVersion,
         log(chainActive.Tip()->nChainWork.getdouble()) / log(2.0),
         (unsigned long)chainActive.Tip()->nChainTx,
         DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
                           chainActive.Tip()->GetBlockTime()),
         GuessVerificationProgress(chainParams.TxData(), chainActive.Tip()),
         pcoinsTip->DynamicMemoryUsage() * (1.0 / (1 << 20)),
         pcoinsTip->GetCacheSize());
     if (!warningMessages.empty())
         LogPrintf(" warning='%s'",
                   boost::algorithm::join(warningMessages, ", "));
     LogPrintf("\n");
 }
 
 /**
  * Disconnect chainActive's tip. You probably want to call
  * mempool.removeForReorg and manually re-limit mempool size after this, with
  * cs_main held.
  */
 static bool DisconnectTip(const Config &config, CValidationState &state,
                           bool fBare = false) {
     CBlockIndex *pindexDelete = chainActive.Tip();
     assert(pindexDelete);
 
     // Read block from disk.
-    CBlock block;
+    std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
+    CBlock &block = *pblock;
     if (!ReadBlockFromDisk(block, pindexDelete, config)) {
         return AbortNode(state, "Failed to read block");
     }
 
     // Apply the block atomically to the chain state.
     int64_t nStart = GetTimeMicros();
     {
         CCoinsViewCache view(pcoinsTip);
         if (DisconnectBlock(block, pindexDelete, view) != DISCONNECT_OK) {
             return error("DisconnectTip(): DisconnectBlock %s failed",
                          pindexDelete->GetBlockHash().ToString());
         }
 
         bool flushed = view.Flush();
         assert(flushed);
     }
 
     LogPrint(BCLog::BENCH, "- Disconnect block: %.2fms\n",
              (GetTimeMicros() - nStart) * 0.001);
 
     // Write the chain state to disk, if necessary.
     if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) {
         return false;
     }
 
     if (!fBare) {
         // Resurrect mempool transactions from the disconnected block.
         std::vector<uint256> vHashUpdate;
         for (const auto &it : block.vtx) {
             const CTransaction &tx = *it;
             // ignore validation errors in resurrected transactions
             CValidationState stateDummy;
             if (tx.IsCoinBase() ||
                 !AcceptToMemoryPool(config, mempool, stateDummy, it, false,
                                     nullptr, nullptr, true)) {
                 mempool.removeRecursive(tx, MemPoolRemovalReason::REORG);
             } else if (mempool.exists(tx.GetId())) {
                 vHashUpdate.push_back(tx.GetId());
             }
         }
         // AcceptToMemoryPool/addUnchecked all assume that new mempool entries
         // have no in-mempool children, which is generally not true when adding
         // previously-confirmed transactions back to the mempool.
         // UpdateTransactionsFromBlock finds descendants of any transactions in
         // this block that were added back and cleans up the mempool state.
         mempool.UpdateTransactionsFromBlock(vHashUpdate);
     }
 
     // Update chainActive and related variables.
     UpdateTip(config, pindexDelete->pprev);
     // Let wallets know transactions went from 1-confirmed to
     // 0-confirmed or conflicted:
-    for (const auto &tx : block.vtx) {
-        GetMainSignals().SyncTransaction(
-            *tx, pindexDelete->pprev,
-            CMainSignals::SYNC_TRANSACTION_NOT_IN_BLOCK);
-    }
+    GetMainSignals().BlockDisconnected(pblock);
     return true;
 }
 
 static int64_t nTimeReadFromDisk = 0;
 static int64_t nTimeConnectTotal = 0;
 static int64_t nTimeFlush = 0;
 static int64_t nTimeChainState = 0;
 static int64_t nTimePostConnect = 0;
 
+struct PerBlockConnectTrace {
+    CBlockIndex *pindex = nullptr;
+    std::shared_ptr<const CBlock> pblock;
+    std::shared_ptr<std::vector<CTransactionRef>> conflictedTxs;
+    PerBlockConnectTrace()
+        : conflictedTxs(std::make_shared<std::vector<CTransactionRef>>()) {}
+};
+
 /**
  * Used to track blocks whose transactions were applied to the UTXO state as a
  * part of a single ActivateBestChainStep call.
+ *
+ * This class also tracks transactions that are removed from the mempool as
+ * conflicts (per block) and can be used to pass all those transactions through
+ * SyncTransaction.
+ *
+ * This class assumes (and asserts) that the conflicted transactions for a given
+ * block are added via mempool callbacks prior to the BlockConnected()
+ * associated with those transactions. If any transactions are marked
+ * conflicted, it is assumed that an associated block will always be added.
+ *
+ * This class is single-use, once you call GetBlocksConnected() you have to
+ * throw it away and make a new one.
  */
-struct ConnectTrace {
-    std::vector<std::pair<CBlockIndex *, std::shared_ptr<const CBlock>>>
-        blocksConnected;
+class ConnectTrace {
+private:
+    std::vector<PerBlockConnectTrace> blocksConnected;
+    CTxMemPool &pool;
+
+public:
+    ConnectTrace(CTxMemPool &_pool) : blocksConnected(1), pool(_pool) {
+        pool.NotifyEntryRemoved.connect(
+            boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
+    }
+
+    ~ConnectTrace() {
+        pool.NotifyEntryRemoved.disconnect(
+            boost::bind(&ConnectTrace::NotifyEntryRemoved, this, _1, _2));
+    }
+
+    void BlockConnected(CBlockIndex *pindex,
+                        std::shared_ptr<const CBlock> pblock) {
+        assert(!blocksConnected.back().pindex);
+        assert(pindex);
+        assert(pblock);
+        blocksConnected.back().pindex = pindex;
+        blocksConnected.back().pblock = std::move(pblock);
+        blocksConnected.emplace_back();
+    }
+
+    std::vector<PerBlockConnectTrace> &GetBlocksConnected() {
+        // We always keep one extra block at the end of our list because blocks
+        // are added after all the conflicted transactions have been filled in.
+        // Thus, the last entry should always be an empty one waiting for the
+        // transactions from the next block. We pop the last entry here to make
+        // sure the list we return is sane.
+        assert(!blocksConnected.back().pindex);
+        assert(blocksConnected.back().conflictedTxs->empty());
+        blocksConnected.pop_back();
+        return blocksConnected;
+    }
+
+    void NotifyEntryRemoved(CTransactionRef txRemoved,
+                            MemPoolRemovalReason reason) {
+        assert(!blocksConnected.back().pindex);
+        if (reason == MemPoolRemovalReason::CONFLICT) {
+            blocksConnected.back().conflictedTxs->emplace_back(
+                std::move(txRemoved));
+        }
+    }
 };
 
 /**
  * Connect a new block to chainActive. pblock is either nullptr or a pointer to
  * a CBlock corresponding to pindexNew, to bypass loading it again from disk.
  *
  * The block is always added to connectTrace (either after loading from disk or
  * by copying pblock) - if that is not intended, care must be taken to remove
  * the last entry in blocksConnected in case of failure.
  */
 static bool ConnectTip(const Config &config, CValidationState &state,
                        CBlockIndex *pindexNew,
                        const std::shared_ptr<const CBlock> &pblock,
                        ConnectTrace &connectTrace) {
     const CChainParams &chainparams = config.GetChainParams();
     assert(pindexNew->pprev == chainActive.Tip());
     // Read block from disk.
     int64_t nTime1 = GetTimeMicros();
+    std::shared_ptr<const CBlock> pthisBlock;
     if (!pblock) {
         std::shared_ptr<CBlock> pblockNew = std::make_shared<CBlock>();
-        connectTrace.blocksConnected.emplace_back(pindexNew, pblockNew);
         if (!ReadBlockFromDisk(*pblockNew, pindexNew, config)) {
             return AbortNode(state, "Failed to read block");
         }
+        pthisBlock = pblockNew;
     } else {
-        connectTrace.blocksConnected.emplace_back(pindexNew, pblock);
+        pthisBlock = pblock;
     }
 
-    const CBlock &blockConnecting = *connectTrace.blocksConnected.back().second;
+    const CBlock &blockConnecting = *pthisBlock;
+
     // Apply the block atomically to the chain state.
     int64_t nTime2 = GetTimeMicros();
     nTimeReadFromDisk += nTime2 - nTime1;
     int64_t nTime3;
     LogPrint(BCLog::BENCH, "  - Load block from disk: %.2fms [%.2fs]\n",
              (nTime2 - nTime1) * 0.001, nTimeReadFromDisk * 0.000001);
     {
         CCoinsViewCache view(pcoinsTip);
         bool rv = ConnectBlock(config, blockConnecting, state, pindexNew, view,
                                chainparams);
         GetMainSignals().BlockChecked(blockConnecting, state);
         if (!rv) {
             if (state.IsInvalid()) {
                 InvalidBlockFound(pindexNew, state);
             }
             return error("ConnectTip(): ConnectBlock %s failed",
                          pindexNew->GetBlockHash().ToString());
         }
         nTime3 = GetTimeMicros();
         nTimeConnectTotal += nTime3 - nTime2;
         LogPrint(BCLog::BENCH, "  - Connect total: %.2fms [%.2fs]\n",
                  (nTime3 - nTime2) * 0.001, nTimeConnectTotal * 0.000001);
         bool flushed = view.Flush();
         assert(flushed);
     }
     int64_t nTime4 = GetTimeMicros();
     nTimeFlush += nTime4 - nTime3;
     LogPrint(BCLog::BENCH, "  - Flush: %.2fms [%.2fs]\n",
              (nTime4 - nTime3) * 0.001, nTimeFlush * 0.000001);
     // Write the chain state to disk, if necessary.
     if (!FlushStateToDisk(state, FLUSH_STATE_IF_NEEDED)) return false;
     int64_t nTime5 = GetTimeMicros();
     nTimeChainState += nTime5 - nTime4;
     LogPrint(BCLog::BENCH, "  - Writing chainstate: %.2fms [%.2fs]\n",
              (nTime5 - nTime4) * 0.001, nTimeChainState * 0.000001);
     // Remove conflicting transactions from the mempool.;
     mempool.removeForBlock(blockConnecting.vtx, pindexNew->nHeight);
     // Update chainActive & related variables.
     UpdateTip(config, pindexNew);
 
     int64_t nTime6 = GetTimeMicros();
     nTimePostConnect += nTime6 - nTime5;
     nTimeTotal += nTime6 - nTime1;
     LogPrint(BCLog::BENCH, "  - Connect postprocess: %.2fms [%.2fs]\n",
              (nTime6 - nTime5) * 0.001, nTimePostConnect * 0.000001);
     LogPrint(BCLog::BENCH, "- Connect block: %.2fms [%.2fs]\n",
              (nTime6 - nTime1) * 0.001, nTimeTotal * 0.000001);
+
+    connectTrace.BlockConnected(pindexNew, std::move(pthisBlock));
     return true;
 }
 
 /**
  * Return the tip of the chain with the most work in it, that isn't known to be
  * invalid (it's however far from certain to be valid).
  */
 static CBlockIndex *FindMostWorkChain() {
     do {
         CBlockIndex *pindexNew = nullptr;
 
         // Find the best candidate header.
         {
             std::set<CBlockIndex *, CBlockIndexWorkComparator>::reverse_iterator
                 it = setBlockIndexCandidates.rbegin();
             if (it == setBlockIndexCandidates.rend()) return nullptr;
             pindexNew = *it;
         }
 
         // Check whether all blocks on the path between the currently active
         // chain and the candidate are valid. Just going until the active chain
         // is an optimization, as we know all blocks in it are valid already.
         CBlockIndex *pindexTest = pindexNew;
         bool fInvalidAncestor = false;
         while (pindexTest && !chainActive.Contains(pindexTest)) {
             assert(pindexTest->nChainTx || pindexTest->nHeight == 0);
 
             // Pruned nodes may have entries in setBlockIndexCandidates for
             // which block files have been deleted. Remove those as candidates
             // for the most work chain if we come across them; we can't switch
             // to a chain unless we have all the non-active-chain parent blocks.
             bool fFailedChain = pindexTest->nStatus & BLOCK_FAILED_MASK;
             bool fMissingData = !(pindexTest->nStatus & BLOCK_HAVE_DATA);
             if (fFailedChain || fMissingData) {
                 // Candidate chain is not usable (either invalid or missing
                 // data)
                 if (fFailedChain &&
                     (pindexBestInvalid == nullptr ||
                      pindexNew->nChainWork > pindexBestInvalid->nChainWork))
                     pindexBestInvalid = pindexNew;
                 CBlockIndex *pindexFailed = pindexNew;
                 // Remove the entire chain from the set.
                 while (pindexTest != pindexFailed) {
                     if (fFailedChain) {
                         pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
                     } else if (fMissingData) {
                         // If we're missing data, then add back to
                         // mapBlocksUnlinked, so that if the block arrives in
                         // the future we can try adding to
                         // setBlockIndexCandidates again.
                         mapBlocksUnlinked.insert(
                             std::make_pair(pindexFailed->pprev, pindexFailed));
                     }
                     setBlockIndexCandidates.erase(pindexFailed);
                     pindexFailed = pindexFailed->pprev;
                 }
                 setBlockIndexCandidates.erase(pindexTest);
                 fInvalidAncestor = true;
                 break;
             }
             pindexTest = pindexTest->pprev;
         }
         if (!fInvalidAncestor) return pindexNew;
     } while (true);
 }
 
 /** Delete all entries in setBlockIndexCandidates that are worse than the
  * current tip. */
 static void PruneBlockIndexCandidates() {
     // Note that we can't delete the current block itself, as we may need to
     // return to it later in case a reorganization to a better block fails.
     std::set<CBlockIndex *, CBlockIndexWorkComparator>::iterator it =
         setBlockIndexCandidates.begin();
     while (it != setBlockIndexCandidates.end() &&
            setBlockIndexCandidates.value_comp()(*it, chainActive.Tip())) {
         setBlockIndexCandidates.erase(it++);
     }
     // Either the current tip or a successor of it we're working towards is left
     // in setBlockIndexCandidates.
     assert(!setBlockIndexCandidates.empty());
 }
 
 /**
  * Try to make some progress towards making pindexMostWork the active block.
  * pblock is either nullptr or a pointer to a CBlock corresponding to
  * pindexMostWork.
  */
 static bool ActivateBestChainStep(const Config &config, CValidationState &state,
                                   CBlockIndex *pindexMostWork,
                                   const std::shared_ptr<const CBlock> &pblock,
                                   bool &fInvalidFound,
                                   ConnectTrace &connectTrace) {
     AssertLockHeld(cs_main);
     const CBlockIndex *pindexOldTip = chainActive.Tip();
     const CBlockIndex *pindexFork = chainActive.FindFork(pindexMostWork);
 
     // Disconnect active blocks which are no longer in the best chain.
     bool fBlocksDisconnected = false;
     while (chainActive.Tip() && chainActive.Tip() != pindexFork) {
         if (!DisconnectTip(config, state)) return false;
         fBlocksDisconnected = true;
     }
 
     // Build list of new blocks to connect.
     std::vector<CBlockIndex *> vpindexToConnect;
     bool fContinue = true;
     int nHeight = pindexFork ? pindexFork->nHeight : -1;
     while (fContinue && nHeight != pindexMostWork->nHeight) {
         // Don't iterate the entire list of potential improvements toward the
         // best tip, as we likely only need a few blocks along the way.
         int nTargetHeight = std::min(nHeight + 32, pindexMostWork->nHeight);
         vpindexToConnect.clear();
         vpindexToConnect.reserve(nTargetHeight - nHeight);
         CBlockIndex *pindexIter = pindexMostWork->GetAncestor(nTargetHeight);
         while (pindexIter && pindexIter->nHeight != nHeight) {
             vpindexToConnect.push_back(pindexIter);
             pindexIter = pindexIter->pprev;
         }
         nHeight = nTargetHeight;
 
         // Connect new blocks.
         for (CBlockIndex *pindexConnect :
              boost::adaptors::reverse(vpindexToConnect)) {
             if (!ConnectTip(config, state, pindexConnect,
                             pindexConnect == pindexMostWork
                                 ? pblock
                                 : std::shared_ptr<const CBlock>(),
                             connectTrace)) {
                 if (state.IsInvalid()) {
                     // The block violates a consensus rule.
                     if (!state.CorruptionPossible())
                         InvalidChainFound(vpindexToConnect.back());
                     state = CValidationState();
                     fInvalidFound = true;
                     fContinue = false;
-                    // If we didn't actually connect the block, don't notify
-                    // listeners about it
-                    connectTrace.blocksConnected.pop_back();
                     break;
                 } else {
                     // A system error occurred (disk space, database error,
                     // ...).
                     return false;
                 }
             } else {
                 PruneBlockIndexCandidates();
                 if (!pindexOldTip ||
                     chainActive.Tip()->nChainWork > pindexOldTip->nChainWork) {
                     // We're in a better position than we were. Return
                     // temporarily to release the lock.
                     fContinue = false;
                     break;
                 }
             }
         }
     }
 
     if (fBlocksDisconnected) {
         mempool.removeForReorg(config, pcoinsTip,
                                chainActive.Tip()->nHeight + 1,
                                STANDARD_LOCKTIME_VERIFY_FLAGS);
         LimitMempoolSize(
             mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
             GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
     }
     mempool.check(pcoinsTip);
 
     // Callbacks/notifications for a new best chain.
     if (fInvalidFound)
         CheckForkWarningConditionsOnNewFork(vpindexToConnect.back());
     else
         CheckForkWarningConditions();
 
     return true;
 }
 
 static void NotifyHeaderTip() {
     bool fNotify = false;
     bool fInitialBlockDownload = false;
     static CBlockIndex *pindexHeaderOld = nullptr;
     CBlockIndex *pindexHeader = nullptr;
     {
         LOCK(cs_main);
         pindexHeader = pindexBestHeader;
 
         if (pindexHeader != pindexHeaderOld) {
             fNotify = true;
             fInitialBlockDownload = IsInitialBlockDownload();
             pindexHeaderOld = pindexHeader;
         }
     }
     // Send block tip changed notifications without cs_main
     if (fNotify) {
         uiInterface.NotifyHeaderTip(fInitialBlockDownload, pindexHeader);
     }
 }
 
 /**
  * Make the best chain active, in multiple steps. The result is either failure
  * or an activated best chain. pblock is either nullptr or a pointer to a block
  * that is already loaded (to avoid loading it again from disk).
  */
 bool ActivateBestChain(const Config &config, CValidationState &state,
                        std::shared_ptr<const CBlock> pblock) {
     // Note that while we're often called here from ProcessNewBlock, this is
     // far from a guarantee. Things in the P2P/RPC will often end up calling
     // us in the middle of ProcessNewBlock - do not assume pblock is set
     // sanely for performance or correctness!
 
     CBlockIndex *pindexMostWork = nullptr;
     CBlockIndex *pindexNewTip = nullptr;
     do {
         boost::this_thread::interruption_point();
         if (ShutdownRequested()) break;
 
         const CBlockIndex *pindexFork;
-        ConnectTrace connectTrace;
         bool fInitialDownload;
         {
             LOCK(cs_main);
-            {
-                // TODO: Tempoarily ensure that mempool removals are notified
-                // before connected transactions. This shouldn't matter, but the
-                // abandoned state of transactions in our wallet is currently
-                // cleared when we receive another notification and there is a
-                // race condition where notification of a connected conflict
-                // might cause an outside process to abandon a transaction and
-                // then have it inadvertantly cleared by the notification that
-                // the conflicted transaction was evicted.
-                MemPoolConflictRemovalTracker mrt(mempool);
-                CBlockIndex *pindexOldTip = chainActive.Tip();
-                if (pindexMostWork == nullptr) {
-                    pindexMostWork = FindMostWorkChain();
-                }
 
-                // Whether we have anything to do at all.
-                if (pindexMostWork == nullptr ||
-                    pindexMostWork == chainActive.Tip()) {
-                    return true;
-                }
+            // Destructed before cs_main is unlocked.
+            ConnectTrace connectTrace(mempool);
 
-                bool fInvalidFound = false;
-                std::shared_ptr<const CBlock> nullBlockPtr;
-                if (!ActivateBestChainStep(
-                        config, state, pindexMostWork,
-                        pblock &&
-                                pblock->GetHash() ==
-                                    pindexMostWork->GetBlockHash()
-                            ? pblock
-                            : nullBlockPtr,
-                        fInvalidFound, connectTrace)) {
-                    return false;
-                }
+            CBlockIndex *pindexOldTip = chainActive.Tip();
+            if (pindexMostWork == nullptr) {
+                pindexMostWork = FindMostWorkChain();
+            }
 
-                if (fInvalidFound) {
-                    // Wipe cache, we may need another branch now.
-                    pindexMostWork = nullptr;
-                }
-                pindexNewTip = chainActive.Tip();
-                pindexFork = chainActive.FindFork(pindexOldTip);
-                fInitialDownload = IsInitialBlockDownload();
-
-                // throw all transactions though the signal-interface
-
-            } // MemPoolConflictRemovalTracker destroyed and conflict evictions
-              // are notified
-
-            // Transactions in the connnected block are notified
-            for (const auto &pair : connectTrace.blocksConnected) {
-                assert(pair.second);
-                const CBlock &block = *(pair.second);
-                for (size_t i = 0; i < block.vtx.size(); i++) {
-                    GetMainSignals().SyncTransaction(*block.vtx[i], pair.first,
-                                                     i);
-                }
+            // Whether we have anything to do at all.
+            if (pindexMostWork == nullptr ||
+                pindexMostWork == chainActive.Tip()) {
+                return true;
+            }
+
+            bool fInvalidFound = false;
+            std::shared_ptr<const CBlock> nullBlockPtr;
+            if (!ActivateBestChainStep(
+                    config, state, pindexMostWork,
+                    pblock &&
+                            pblock->GetHash() == pindexMostWork->GetBlockHash()
+                        ? pblock
+                        : nullBlockPtr,
+                    fInvalidFound, connectTrace)) {
+                return false;
+            }
+
+            if (fInvalidFound) {
+                // Wipe cache, we may need another branch now.
+                pindexMostWork = nullptr;
+            }
+            pindexNewTip = chainActive.Tip();
+            pindexFork = chainActive.FindFork(pindexOldTip);
+            fInitialDownload = IsInitialBlockDownload();
+
+            for (const PerBlockConnectTrace &trace :
+                 connectTrace.GetBlocksConnected()) {
+                assert(trace.pblock && trace.pindex);
+                GetMainSignals().BlockConnected(trace.pblock, trace.pindex,
+                                                *trace.conflictedTxs);
             }
         }
         // When we reach this point, we switched to a new tip (stored in
         // pindexNewTip).
 
         // Notifications/callbacks that can run without cs_main
 
         // Notify external listeners about the new tip.
         GetMainSignals().UpdatedBlockTip(pindexNewTip, pindexFork,
                                          fInitialDownload);
 
         // Always notify the UI if a new block tip was connected
         if (pindexFork != pindexNewTip) {
             uiInterface.NotifyBlockTip(fInitialDownload, pindexNewTip);
         }
     } while (pindexNewTip != pindexMostWork);
     CheckBlockIndex(config.GetChainParams().GetConsensus());
 
     // Write changes periodically to disk, after relay.
     if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) {
         return false;
     }
 
     int nStopAtHeight = GetArg("-stopatheight", DEFAULT_STOPATHEIGHT);
     if (nStopAtHeight && pindexNewTip &&
         pindexNewTip->nHeight >= nStopAtHeight) {
         StartShutdown();
     }
 
     return true;
 }
 
 bool PreciousBlock(const Config &config, CValidationState &state,
                    CBlockIndex *pindex) {
     {
         LOCK(cs_main);
         if (pindex->nChainWork < chainActive.Tip()->nChainWork) {
             // Nothing to do, this block is not at the tip.
             return true;
         }
         if (chainActive.Tip()->nChainWork > nLastPreciousChainwork) {
             // The chain has been extended since the last call, reset the
             // counter.
             nBlockReverseSequenceId = -1;
         }
         nLastPreciousChainwork = chainActive.Tip()->nChainWork;
         setBlockIndexCandidates.erase(pindex);
         pindex->nSequenceId = nBlockReverseSequenceId;
         if (nBlockReverseSequenceId > std::numeric_limits<int32_t>::min()) {
             // We can't keep reducing the counter if somebody really wants to
             // call preciousblock 2**31-1 times on the same set of tips...
             nBlockReverseSequenceId--;
         }
         if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) && pindex->nChainTx) {
             setBlockIndexCandidates.insert(pindex);
             PruneBlockIndexCandidates();
         }
     }
 
     return ActivateBestChain(config, state);
 }
 
 bool InvalidateBlock(const Config &config, CValidationState &state,
                      CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
 
     // Mark the block itself as invalid.
     pindex->nStatus |= BLOCK_FAILED_VALID;
     setDirtyBlockIndex.insert(pindex);
     setBlockIndexCandidates.erase(pindex);
 
     while (chainActive.Contains(pindex)) {
         CBlockIndex *pindexWalk = chainActive.Tip();
         pindexWalk->nStatus |= BLOCK_FAILED_CHILD;
         setDirtyBlockIndex.insert(pindexWalk);
         setBlockIndexCandidates.erase(pindexWalk);
         // ActivateBestChain considers blocks already in chainActive
         // unconditionally valid already, so force disconnect away from it.
         if (!DisconnectTip(config, state)) {
             mempool.removeForReorg(config, pcoinsTip,
                                    chainActive.Tip()->nHeight + 1,
                                    STANDARD_LOCKTIME_VERIFY_FLAGS);
             return false;
         }
     }
 
     LimitMempoolSize(
         mempool, GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
         GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
 
     // The resulting new best tip may not be in setBlockIndexCandidates anymore,
     // so add it again.
     BlockMap::iterator it = mapBlockIndex.begin();
     while (it != mapBlockIndex.end()) {
         if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) &&
             it->second->nChainTx &&
             !setBlockIndexCandidates.value_comp()(it->second,
                                                   chainActive.Tip())) {
             setBlockIndexCandidates.insert(it->second);
         }
         it++;
     }
 
     InvalidChainFound(pindex);
     mempool.removeForReorg(config, pcoinsTip, chainActive.Tip()->nHeight + 1,
                            STANDARD_LOCKTIME_VERIFY_FLAGS);
     uiInterface.NotifyBlockTip(IsInitialBlockDownload(), pindex->pprev);
     return true;
 }
 
 bool ResetBlockFailureFlags(CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
 
     int nHeight = pindex->nHeight;
 
     // Remove the invalidity flag from this block and all its descendants.
     BlockMap::iterator it = mapBlockIndex.begin();
     while (it != mapBlockIndex.end()) {
         if (!it->second->IsValid() &&
             it->second->GetAncestor(nHeight) == pindex) {
             it->second->nStatus &= ~BLOCK_FAILED_MASK;
             setDirtyBlockIndex.insert(it->second);
             if (it->second->IsValid(BLOCK_VALID_TRANSACTIONS) &&
                 it->second->nChainTx &&
                 setBlockIndexCandidates.value_comp()(chainActive.Tip(),
                                                      it->second)) {
                 setBlockIndexCandidates.insert(it->second);
             }
             if (it->second == pindexBestInvalid) {
                 // Reset invalid block marker if it was pointing to one of
                 // those.
                 pindexBestInvalid = nullptr;
             }
         }
         it++;
     }
 
     // Remove the invalidity flag from all ancestors too.
     while (pindex != nullptr) {
         if (pindex->nStatus & BLOCK_FAILED_MASK) {
             pindex->nStatus &= ~BLOCK_FAILED_MASK;
             setDirtyBlockIndex.insert(pindex);
         }
         pindex = pindex->pprev;
     }
     return true;
 }
 
 CBlockIndex *AddToBlockIndex(const CBlockHeader &block) {
     // Check for duplicate
     uint256 hash = block.GetHash();
     BlockMap::iterator it = mapBlockIndex.find(hash);
     if (it != mapBlockIndex.end()) return it->second;
 
     // Construct new block index object
     CBlockIndex *pindexNew = new CBlockIndex(block);
     assert(pindexNew);
     // We assign the sequence id to blocks only when the full data is available,
     // to avoid miners withholding blocks but broadcasting headers, to get a
     // competitive advantage.
     pindexNew->nSequenceId = 0;
     BlockMap::iterator mi =
         mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
     pindexNew->phashBlock = &((*mi).first);
     BlockMap::iterator miPrev = mapBlockIndex.find(block.hashPrevBlock);
     if (miPrev != mapBlockIndex.end()) {
         pindexNew->pprev = (*miPrev).second;
         pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
         pindexNew->BuildSkip();
     }
     pindexNew->nTimeMax =
         (pindexNew->pprev
              ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime)
              : pindexNew->nTime);
     pindexNew->nChainWork =
         (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) +
         GetBlockProof(*pindexNew);
     pindexNew->RaiseValidity(BLOCK_VALID_TREE);
     if (pindexBestHeader == nullptr ||
         pindexBestHeader->nChainWork < pindexNew->nChainWork) {
         pindexBestHeader = pindexNew;
     }
 
     setDirtyBlockIndex.insert(pindexNew);
 
     return pindexNew;
 }
 
 /**
  * Mark a block as having its data received and checked (up to
  * BLOCK_VALID_TRANSACTIONS).
  */
 bool ReceivedBlockTransactions(const CBlock &block, CValidationState &state,
                                CBlockIndex *pindexNew,
                                const CDiskBlockPos &pos) {
     pindexNew->nTx = block.vtx.size();
     pindexNew->nChainTx = 0;
     pindexNew->nFile = pos.nFile;
     pindexNew->nDataPos = pos.nPos;
     pindexNew->nUndoPos = 0;
     pindexNew->nStatus |= BLOCK_HAVE_DATA;
     pindexNew->RaiseValidity(BLOCK_VALID_TRANSACTIONS);
     setDirtyBlockIndex.insert(pindexNew);
 
     if (pindexNew->pprev == nullptr || pindexNew->pprev->nChainTx) {
         // If pindexNew is the genesis block or all parents are
         // BLOCK_VALID_TRANSACTIONS.
         std::deque<CBlockIndex *> queue;
         queue.push_back(pindexNew);
 
         // Recursively process any descendant blocks that now may be eligible to
         // be connected.
         while (!queue.empty()) {
             CBlockIndex *pindex = queue.front();
             queue.pop_front();
             pindex->nChainTx =
                 (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
             {
                 LOCK(cs_nBlockSequenceId);
                 pindex->nSequenceId = nBlockSequenceId++;
             }
             if (chainActive.Tip() == nullptr ||
                 !setBlockIndexCandidates.value_comp()(pindex,
                                                       chainActive.Tip())) {
                 setBlockIndexCandidates.insert(pindex);
             }
             std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                       std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
                 range = mapBlocksUnlinked.equal_range(pindex);
             while (range.first != range.second) {
                 std::multimap<CBlockIndex *, CBlockIndex *>::iterator it =
                     range.first;
                 queue.push_back(it->second);
                 range.first++;
                 mapBlocksUnlinked.erase(it);
             }
         }
     } else {
         if (pindexNew->pprev && pindexNew->pprev->IsValid(BLOCK_VALID_TREE)) {
             mapBlocksUnlinked.insert(
                 std::make_pair(pindexNew->pprev, pindexNew));
         }
     }
 
     return true;
 }
 
 bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos,
                   unsigned int nAddSize, unsigned int nHeight, uint64_t nTime,
                   bool fKnown = false) {
     LOCK(cs_LastBlockFile);
 
     unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
     if (vinfoBlockFile.size() <= nFile) {
         vinfoBlockFile.resize(nFile + 1);
     }
 
     if (!fKnown) {
         while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
             nFile++;
             if (vinfoBlockFile.size() <= nFile) {
                 vinfoBlockFile.resize(nFile + 1);
             }
         }
         pos.nFile = nFile;
         pos.nPos = vinfoBlockFile[nFile].nSize;
     }
 
     if ((int)nFile != nLastBlockFile) {
         if (!fKnown) {
             LogPrintf("Leaving block file %i: %s\n", nLastBlockFile,
                       vinfoBlockFile[nLastBlockFile].ToString());
         }
         FlushBlockFile(!fKnown);
         nLastBlockFile = nFile;
     }
 
     vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
     if (fKnown)
         vinfoBlockFile[nFile].nSize =
             std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
     else
         vinfoBlockFile[nFile].nSize += nAddSize;
 
     if (!fKnown) {
         unsigned int nOldChunks =
             (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
         unsigned int nNewChunks =
             (vinfoBlockFile[nFile].nSize + BLOCKFILE_CHUNK_SIZE - 1) /
             BLOCKFILE_CHUNK_SIZE;
         if (nNewChunks > nOldChunks) {
             if (fPruneMode) fCheckForPruning = true;
             if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
                 FILE *file = OpenBlockFile(pos);
                 if (file) {
                     LogPrintf(
                         "Pre-allocating up to position 0x%x in blk%05u.dat\n",
                         nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
                     AllocateFileRange(file, pos.nPos,
                                       nNewChunks * BLOCKFILE_CHUNK_SIZE -
                                           pos.nPos);
                     fclose(file);
                 }
             } else
                 return state.Error("out of disk space");
         }
     }
 
     setDirtyFileInfo.insert(nFile);
     return true;
 }
 
 bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos,
                  unsigned int nAddSize) {
     pos.nFile = nFile;
 
     LOCK(cs_LastBlockFile);
 
     unsigned int nNewSize;
     pos.nPos = vinfoBlockFile[nFile].nUndoSize;
     nNewSize = vinfoBlockFile[nFile].nUndoSize += nAddSize;
     setDirtyFileInfo.insert(nFile);
 
     unsigned int nOldChunks =
         (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
     unsigned int nNewChunks =
         (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
     if (nNewChunks > nOldChunks) {
         if (fPruneMode) fCheckForPruning = true;
         if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
             FILE *file = OpenUndoFile(pos);
             if (file) {
                 LogPrintf("Pre-allocating up to position 0x%x in rev%05u.dat\n",
                           nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
                 AllocateFileRange(file, pos.nPos,
                                   nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
                 fclose(file);
             }
         } else
             return state.Error("out of disk space");
     }
 
     return true;
 }
 
 static bool CheckBlockHeader(const Config &config, const CBlockHeader &block,
                              CValidationState &state, bool fCheckPOW = true) {
     // Check proof of work matches claimed amount
     if (fCheckPOW && !CheckProofOfWork(block.GetHash(), block.nBits, config)) {
         return state.DoS(50, false, REJECT_INVALID, "high-hash", false,
                          "proof of work failed");
     }
 
     return true;
 }
 
 bool CheckBlock(const Config &config, const CBlock &block,
                 CValidationState &state, bool fCheckPOW,
                 bool fCheckMerkleRoot) {
     // These are checks that are independent of context.
     if (block.fChecked) {
         return true;
     }
 
     // Check that the header is valid (particularly PoW).  This is mostly
     // redundant with the call in AcceptBlockHeader.
     if (!CheckBlockHeader(config, block, state, fCheckPOW)) {
         return false;
     }
 
     // Check the merkle root.
     if (fCheckMerkleRoot) {
         bool mutated;
         uint256 hashMerkleRoot2 = BlockMerkleRoot(block, &mutated);
         if (block.hashMerkleRoot != hashMerkleRoot2) {
             return state.DoS(100, false, REJECT_INVALID, "bad-txnmrklroot",
                              true, "hashMerkleRoot mismatch");
         }
 
         // Check for merkle tree malleability (CVE-2012-2459): repeating
         // sequences of transactions in a block without affecting the merkle
         // root of a block, while still invalidating it.
         if (mutated) {
             return state.DoS(100, false, REJECT_INVALID, "bad-txns-duplicate",
                              true, "duplicate transaction");
         }
     }
 
     // All potential-corruption validation must be done before we do any
     // transaction validation, as otherwise we may mark the header as invalid
     // because we receive the wrong transactions for it.
 
     // First transaction must be coinbase.
     if (block.vtx.empty()) {
         return state.DoS(100, false, REJECT_INVALID, "bad-cb-missing", false,
                          "first tx is not coinbase");
     }
 
     // Size limits.
     auto nMaxBlockSize = config.GetMaxBlockSize();
 
     // Bail early if there is no way this block is of reasonable size.
     if ((block.vtx.size() * MIN_TRANSACTION_SIZE) > nMaxBlockSize) {
         return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false,
                          "size limits failed");
     }
 
     auto currentBlockSize =
         ::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION);
     if (currentBlockSize > nMaxBlockSize) {
         return state.DoS(100, false, REJECT_INVALID, "bad-blk-length", false,
                          "size limits failed");
     }
 
     // And a valid coinbase.
     if (!CheckCoinbase(*block.vtx[0], state, false)) {
         return state.Invalid(false, state.GetRejectCode(),
                              state.GetRejectReason(),
                              strprintf("Coinbase check failed (txid %s) %s",
                                        block.vtx[0]->GetId().ToString(),
                                        state.GetDebugMessage()));
     }
 
     // Keep track of the sigops count.
     uint64_t nSigOps = 0;
     auto nMaxSigOpsCount = GetMaxBlockSigOpsCount(currentBlockSize);
 
     // Check transactions
     auto txCount = block.vtx.size();
     auto *tx = block.vtx[0].get();
 
     size_t i = 0;
     while (true) {
         // Count the sigops for the current transaction. If the total sigops
         // count is too high, the the block is invalid.
         nSigOps += GetSigOpCountWithoutP2SH(*tx);
         if (nSigOps > nMaxSigOpsCount) {
             return state.DoS(100, false, REJECT_INVALID, "bad-blk-sigops",
                              false, "out-of-bounds SigOpCount");
         }
 
         // Go to the next transaction.
         i++;
 
         // We reached the end of the block, success.
         if (i >= txCount) {
             break;
         }
 
         // Check that the transaction is valid. because this check differs for
         // the coinbase, the loos is arranged such as this only runs after at
         // least one increment.
         tx = block.vtx[i].get();
         if (!CheckRegularTransaction(*tx, state, false)) {
             return state.Invalid(
                 false, state.GetRejectCode(), state.GetRejectReason(),
                 strprintf("Transaction check failed (txid %s) %s",
                           tx->GetId().ToString(), state.GetDebugMessage()));
         }
     }
 
     if (fCheckPOW && fCheckMerkleRoot) {
         block.fChecked = true;
     }
 
     return true;
 }
 
 static bool CheckIndexAgainstCheckpoint(const CBlockIndex *pindexPrev,
                                         CValidationState &state,
                                         const CChainParams &chainparams,
                                         const uint256 &hash) {
     if (*pindexPrev->phashBlock ==
         chainparams.GetConsensus().hashGenesisBlock) {
         return true;
     }
 
     int nHeight = pindexPrev->nHeight + 1;
     // Don't accept any forks from the main chain prior to last checkpoint.
     // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in
     // our MapBlockIndex.
     CBlockIndex *pcheckpoint =
         Checkpoints::GetLastCheckpoint(chainparams.Checkpoints());
     if (pcheckpoint && nHeight < pcheckpoint->nHeight) {
         return state.DoS(
             100,
             error("%s: forked chain older than last checkpoint (height %d)",
                   __func__, nHeight),
             REJECT_CHECKPOINT, "bad-fork-prior-to-checkpoint");
     }
 
     return true;
 }
 
 static bool ContextualCheckBlockHeader(const Config &config,
                                        const CBlockHeader &block,
                                        CValidationState &state,
                                        const CBlockIndex *pindexPrev,
                                        int64_t nAdjustedTime) {
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
 
     // Check proof of work
     if (block.nBits != GetNextWorkRequired(pindexPrev, &block, config)) {
         LogPrintf("bad bits after height: %d\n", pindexPrev->nHeight);
         return state.DoS(100, false, REJECT_INVALID, "bad-diffbits", false,
                          "incorrect proof of work");
     }
 
     // Check timestamp against prev
     if (block.GetBlockTime() <= pindexPrev->GetMedianTimePast()) {
         return state.Invalid(false, REJECT_INVALID, "time-too-old",
                              "block's timestamp is too early");
     }
 
     // Check timestamp
     if (block.GetBlockTime() > nAdjustedTime + MAX_FUTURE_BLOCK_TIME) {
         return state.Invalid(false, REJECT_INVALID, "time-too-new",
                              "block timestamp too far in the future");
     }
 
     // Reject outdated version blocks when 95% (75% on testnet) of the network
     // has upgraded:
     // check for version 2, 3 and 4 upgrades
     if ((block.nVersion < 2 && nHeight >= consensusParams.BIP34Height) ||
         (block.nVersion < 3 && nHeight >= consensusParams.BIP66Height) ||
         (block.nVersion < 4 && nHeight >= consensusParams.BIP65Height)) {
         return state.Invalid(
             false, REJECT_OBSOLETE,
             strprintf("bad-version(0x%08x)", block.nVersion),
             strprintf("rejected nVersion=0x%08x block", block.nVersion));
     }
 
     return true;
 }
 
 bool ContextualCheckTransaction(const Config &config, const CTransaction &tx,
                                 CValidationState &state, int nHeight,
                                 int64_t nLockTimeCutoff) {
     if (!IsFinalTx(tx, nHeight, nLockTimeCutoff)) {
         // While this is only one transaction, we use txns in the error to
         // ensure continuity with other clients.
         return state.DoS(10, false, REJECT_INVALID, "bad-txns-nonfinal", false,
                          "non-final transaction");
     }
 
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     if (IsUAHFenabled(config, nHeight) &&
         nHeight <= consensusParams.antiReplayOpReturnSunsetHeight) {
         for (const CTxOut &o : tx.vout) {
             if (o.scriptPubKey.IsCommitment(
                     consensusParams.antiReplayOpReturnCommitment)) {
                 return state.DoS(10, false, REJECT_INVALID, "bad-txn-replay",
                                  false, "non playable transaction");
             }
         }
     }
 
     return true;
 }
 
 bool ContextualCheckTransactionForCurrentBlock(const Config &config,
                                                const CTransaction &tx,
                                                CValidationState &state,
                                                int flags) {
     AssertLockHeld(cs_main);
 
     // By convention a negative value for flags indicates that the current
     // network-enforced consensus rules should be used. In a future soft-fork
     // scenario that would mean checking which rules would be enforced for the
     // next block and setting the appropriate flags. At the present time no
     // soft-forks are scheduled, so no flags are set.
     flags = std::max(flags, 0);
 
     // ContextualCheckTransactionForCurrentBlock() uses chainActive.Height()+1
     // to evaluate nLockTime because when IsFinalTx() is called within
     // CBlock::AcceptBlock(), the height of the block *being* evaluated is what
     // is used. Thus if we want to know if a transaction can be part of the
     // *next* block, we need to call ContextualCheckTransaction() with one more
     // than chainActive.Height().
     const int nBlockHeight = chainActive.Height() + 1;
 
     // BIP113 will require that time-locked transactions have nLockTime set to
     // less than the median time of the previous block they're contained in.
     // When the next block is created its previous block will be the current
     // chain tip, so we use that to calculate the median time passed to
     // ContextualCheckTransaction() if LOCKTIME_MEDIAN_TIME_PAST is set.
     const int64_t nLockTimeCutoff = (flags & LOCKTIME_MEDIAN_TIME_PAST)
                                         ? chainActive.Tip()->GetMedianTimePast()
                                         : GetAdjustedTime();
 
     return ContextualCheckTransaction(config, tx, state, nBlockHeight,
                                       nLockTimeCutoff);
 }
 
 bool ContextualCheckBlock(const Config &config, const CBlock &block,
                           CValidationState &state,
                           const Consensus::Params &consensusParams,
                           const CBlockIndex *pindexPrev) {
     const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
 
     // Start enforcing BIP113 (Median Time Past) using versionbits logic.
     int nLockTimeFlags = 0;
     if (VersionBitsState(pindexPrev, consensusParams, Consensus::DEPLOYMENT_CSV,
                          versionbitscache) == THRESHOLD_ACTIVE) {
         nLockTimeFlags |= LOCKTIME_MEDIAN_TIME_PAST;
     }
 
     const int64_t nMedianTimePast =
         pindexPrev == nullptr ? 0 : pindexPrev->GetMedianTimePast();
 
     const int64_t nLockTimeCutoff = (nLockTimeFlags & LOCKTIME_MEDIAN_TIME_PAST)
                                         ? nMedianTimePast
                                         : block.GetBlockTime();
 
     // Check that all transactions are finalized
     for (const auto &tx : block.vtx) {
         if (!ContextualCheckTransaction(config, *tx, state, nHeight,
                                         nLockTimeCutoff)) {
             // state set by ContextualCheckTransaction.
             return false;
         }
     }
 
     // Enforce rule that the coinbase starts with serialized block height
     if (nHeight >= consensusParams.BIP34Height) {
         CScript expect = CScript() << nHeight;
         if (block.vtx[0]->vin[0].scriptSig.size() < expect.size() ||
             !std::equal(expect.begin(), expect.end(),
                         block.vtx[0]->vin[0].scriptSig.begin())) {
             return state.DoS(100, false, REJECT_INVALID, "bad-cb-height", false,
                              "block height mismatch in coinbase");
         }
     }
 
     return true;
 }
 
 static bool AcceptBlockHeader(const Config &config, const CBlockHeader &block,
                               CValidationState &state, CBlockIndex **ppindex) {
     AssertLockHeld(cs_main);
     const CChainParams &chainparams = config.GetChainParams();
 
     // Check for duplicate
     uint256 hash = block.GetHash();
     BlockMap::iterator miSelf = mapBlockIndex.find(hash);
     CBlockIndex *pindex = nullptr;
     if (hash != chainparams.GetConsensus().hashGenesisBlock) {
         if (miSelf != mapBlockIndex.end()) {
             // Block header is already known.
             pindex = miSelf->second;
             if (ppindex) {
                 *ppindex = pindex;
             }
             if (pindex->nStatus & BLOCK_FAILED_MASK) {
                 return state.Invalid(error("%s: block %s is marked invalid",
                                            __func__, hash.ToString()),
                                      0, "duplicate");
             }
             return true;
         }
 
         if (!CheckBlockHeader(config, block, state)) {
             return error("%s: Consensus::CheckBlockHeader: %s, %s", __func__,
                          hash.ToString(), FormatStateMessage(state));
         }
 
         // Get prev block index
         CBlockIndex *pindexPrev = nullptr;
         BlockMap::iterator mi = mapBlockIndex.find(block.hashPrevBlock);
         if (mi == mapBlockIndex.end()) {
             return state.DoS(10, error("%s: prev block not found", __func__), 0,
                              "prev-blk-not-found");
         }
 
         pindexPrev = (*mi).second;
         if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
             return state.DoS(100, error("%s: prev block invalid", __func__),
                              REJECT_INVALID, "bad-prevblk");
         }
 
         assert(pindexPrev);
         if (fCheckpointsEnabled &&
             !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams,
                                          hash)) {
             return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__,
                          state.GetRejectReason().c_str());
         }
 
         if (!ContextualCheckBlockHeader(config, block, state, pindexPrev,
                                         GetAdjustedTime())) {
             return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s",
                          __func__, hash.ToString(), FormatStateMessage(state));
         }
     }
 
     if (pindex == nullptr) {
         pindex = AddToBlockIndex(block);
     }
 
     if (ppindex) {
         *ppindex = pindex;
     }
 
     CheckBlockIndex(chainparams.GetConsensus());
 
     return true;
 }
 
 // Exposed wrapper for AcceptBlockHeader
 bool ProcessNewBlockHeaders(const Config &config,
                             const std::vector<CBlockHeader> &headers,
                             CValidationState &state,
                             const CBlockIndex **ppindex) {
     {
         LOCK(cs_main);
         for (const CBlockHeader &header : headers) {
             // Use a temp pindex instead of ppindex to avoid a const_cast
             CBlockIndex *pindex = nullptr;
             if (!AcceptBlockHeader(config, header, state, &pindex)) {
                 return false;
             }
             if (ppindex) {
                 *ppindex = pindex;
             }
         }
     }
     NotifyHeaderTip();
     return true;
 }
 
 /**
  * Store block on disk. If dbp is non-null, the file is known to already reside
  * on disk.
  */
 static bool AcceptBlock(const Config &config,
                         const std::shared_ptr<const CBlock> &pblock,
                         CValidationState &state, CBlockIndex **ppindex,
                         bool fRequested, const CDiskBlockPos *dbp,
                         bool *fNewBlock) {
     AssertLockHeld(cs_main);
 
     const CBlock &block = *pblock;
     if (fNewBlock) {
         *fNewBlock = false;
     }
 
     CBlockIndex *pindexDummy = nullptr;
     CBlockIndex *&pindex = ppindex ? *ppindex : pindexDummy;
 
     if (!AcceptBlockHeader(config, block, state, &pindex)) {
         return false;
     }
 
     // Try to process all requested blocks that we don't have, but only
     // process an unrequested block if it's new and has enough work to
     // advance our tip, and isn't too many blocks ahead.
     bool fAlreadyHave = pindex->nStatus & BLOCK_HAVE_DATA;
     bool fHasMoreWork =
         (chainActive.Tip() ? pindex->nChainWork > chainActive.Tip()->nChainWork
                            : true);
     // Blocks that are too out-of-order needlessly limit the effectiveness of
     // pruning, because pruning will not delete block files that contain any
     // blocks which are too close in height to the tip.  Apply this test
     // regardless of whether pruning is enabled; it should generally be safe to
     // not process unrequested blocks.
     bool fTooFarAhead =
         (pindex->nHeight > int(chainActive.Height() + MIN_BLOCKS_TO_KEEP));
 
     // TODO: Decouple this function from the block download logic by removing
     // fRequested
     // This requires some new chain datastructure to efficiently look up if a
     // block is in a chain leading to a candidate for best tip, despite not
     // being such a candidate itself.
 
     // TODO: deal better with return value and error conditions for duplicate
     // and unrequested blocks.
     if (fAlreadyHave) {
         return true;
     }
 
     // If we didn't ask for it:
     if (!fRequested) {
         // This is a previously-processed block that was pruned.
         if (pindex->nTx != 0) {
             return true;
         }
 
         // Don't process less-work chains.
         if (!fHasMoreWork) {
             return true;
         }
 
         // Block height is too high.
         if (fTooFarAhead) {
             return true;
         }
     }
 
     if (fNewBlock) {
         *fNewBlock = true;
     }
 
     const CChainParams &chainparams = config.GetChainParams();
     if (!CheckBlock(config, block, state) ||
         !ContextualCheckBlock(config, block, state, chainparams.GetConsensus(),
                               pindex->pprev)) {
         if (state.IsInvalid() && !state.CorruptionPossible()) {
             pindex->nStatus |= BLOCK_FAILED_VALID;
             setDirtyBlockIndex.insert(pindex);
         }
         return error("%s: %s (block %s)", __func__, FormatStateMessage(state),
                      block.GetHash().ToString());
     }
 
     // Header is valid/has work, merkle tree and segwit merkle tree are
     // good...RELAY NOW (but if it does not build on our best tip, let the
     // SendMessages loop relay it)
     if (!IsInitialBlockDownload() && chainActive.Tip() == pindex->pprev) {
         GetMainSignals().NewPoWValidBlock(pindex, pblock);
     }
 
     int nHeight = pindex->nHeight;
 
     // Write block to history file
     try {
         unsigned int nBlockSize =
             ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
         CDiskBlockPos blockPos;
         if (dbp != nullptr) {
             blockPos = *dbp;
         }
         if (!FindBlockPos(state, blockPos, nBlockSize + 8, nHeight,
                           block.GetBlockTime(), dbp != nullptr)) {
             return error("AcceptBlock(): FindBlockPos failed");
         }
         if (dbp == nullptr) {
             if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
                 AbortNode(state, "Failed to write block");
             }
         }
         if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) {
             return error("AcceptBlock(): ReceivedBlockTransactions failed");
         }
     } catch (const std::runtime_error &e) {
         return AbortNode(state, std::string("System error: ") + e.what());
     }
 
     if (fCheckForPruning) {
         // we just allocated more disk space for block files.
         FlushStateToDisk(state, FLUSH_STATE_NONE);
     }
 
     return true;
 }
 
 bool ProcessNewBlock(const Config &config,
                      const std::shared_ptr<const CBlock> pblock,
                      bool fForceProcessing, bool *fNewBlock) {
     {
         CBlockIndex *pindex = nullptr;
         if (fNewBlock) *fNewBlock = false;
 
         const CChainParams &chainparams = config.GetChainParams();
 
         CValidationState state;
         // Ensure that CheckBlock() passes before calling AcceptBlock, as
         // belt-and-suspenders.
         bool ret = CheckBlock(config, *pblock, state);
 
         LOCK(cs_main);
 
         if (ret) {
             // Store to disk
             ret = AcceptBlock(config, pblock, state, &pindex, fForceProcessing,
                               nullptr, fNewBlock);
         }
         CheckBlockIndex(chainparams.GetConsensus());
         if (!ret) {
             GetMainSignals().BlockChecked(*pblock, state);
             return error("%s: AcceptBlock FAILED", __func__);
         }
     }
 
     NotifyHeaderTip();
 
     // Only used to report errors, not invalidity - ignore it
     CValidationState state;
     if (!ActivateBestChain(config, state, pblock))
         return error("%s: ActivateBestChain failed", __func__);
 
     return true;
 }
 
 bool TestBlockValidity(const Config &config, CValidationState &state,
                        const CBlock &block, CBlockIndex *pindexPrev,
                        bool fCheckPOW, bool fCheckMerkleRoot) {
     AssertLockHeld(cs_main);
     const CChainParams &chainparams = config.GetChainParams();
 
     assert(pindexPrev && pindexPrev == chainActive.Tip());
     if (fCheckpointsEnabled &&
         !CheckIndexAgainstCheckpoint(pindexPrev, state, chainparams,
                                      block.GetHash())) {
         return error("%s: CheckIndexAgainstCheckpoint(): %s", __func__,
                      state.GetRejectReason().c_str());
     }
 
     CCoinsViewCache viewNew(pcoinsTip);
     CBlockIndex indexDummy(block);
     indexDummy.pprev = pindexPrev;
     indexDummy.nHeight = pindexPrev->nHeight + 1;
 
     // NOTE: CheckBlockHeader is called by CheckBlock
     if (!ContextualCheckBlockHeader(config, block, state, pindexPrev,
                                     GetAdjustedTime())) {
         return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__,
                      FormatStateMessage(state));
     }
     if (!CheckBlock(config, block, state, fCheckPOW, fCheckMerkleRoot)) {
         return error("%s: Consensus::CheckBlock: %s", __func__,
                      FormatStateMessage(state));
     }
     if (!ContextualCheckBlock(config, block, state, chainparams.GetConsensus(),
                               pindexPrev)) {
         return error("%s: Consensus::ContextualCheckBlock: %s", __func__,
                      FormatStateMessage(state));
     }
     if (!ConnectBlock(config, block, state, &indexDummy, viewNew, chainparams,
                       true)) {
         return false;
     }
 
     assert(state.IsValid());
     return true;
 }
 
 /**
  * BLOCK PRUNING CODE
  */
 
 /* Calculate the amount of disk space the block & undo files currently use */
 uint64_t CalculateCurrentUsage() {
     uint64_t retval = 0;
     for (const CBlockFileInfo &file : vinfoBlockFile) {
         retval += file.nSize + file.nUndoSize;
     }
     return retval;
 }
 
 /* Prune a block file (modify associated database entries)*/
 void PruneOneBlockFile(const int fileNumber) {
     for (BlockMap::iterator it = mapBlockIndex.begin();
          it != mapBlockIndex.end(); ++it) {
         CBlockIndex *pindex = it->second;
         if (pindex->nFile == fileNumber) {
             pindex->nStatus &= ~BLOCK_HAVE_DATA;
             pindex->nStatus &= ~BLOCK_HAVE_UNDO;
             pindex->nFile = 0;
             pindex->nDataPos = 0;
             pindex->nUndoPos = 0;
             setDirtyBlockIndex.insert(pindex);
 
             // Prune from mapBlocksUnlinked -- any block we prune would have
             // to be downloaded again in order to consider its chain, at which
             // point it would be considered as a candidate for
             // mapBlocksUnlinked or setBlockIndexCandidates.
             std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                       std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
                 range = mapBlocksUnlinked.equal_range(pindex->pprev);
             while (range.first != range.second) {
                 std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it =
                     range.first;
                 range.first++;
                 if (_it->second == pindex) {
                     mapBlocksUnlinked.erase(_it);
                 }
             }
         }
     }
 
     vinfoBlockFile[fileNumber].SetNull();
     setDirtyFileInfo.insert(fileNumber);
 }
 
 void UnlinkPrunedFiles(const std::set<int> &setFilesToPrune) {
     for (std::set<int>::iterator it = setFilesToPrune.begin();
          it != setFilesToPrune.end(); ++it) {
         CDiskBlockPos pos(*it, 0);
         fs::remove(GetBlockPosFilename(pos, "blk"));
         fs::remove(GetBlockPosFilename(pos, "rev"));
         LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
     }
 }
 
 /**
  * Calculate the block/rev files to delete based on height specified by user
  * with RPC command pruneblockchain.
  */
 static void FindFilesToPruneManual(std::set<int> &setFilesToPrune,
                                    int nManualPruneHeight) {
     assert(fPruneMode && nManualPruneHeight > 0);
 
     LOCK2(cs_main, cs_LastBlockFile);
     if (chainActive.Tip() == nullptr) {
         return;
     }
 
     // last block to prune is the lesser of (user-specified height,
     // MIN_BLOCKS_TO_KEEP from the tip)
     unsigned int nLastBlockWeCanPrune =
         std::min((unsigned)nManualPruneHeight,
                  chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP);
     int count = 0;
     for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
         if (vinfoBlockFile[fileNumber].nSize == 0 ||
             vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
             continue;
         }
         PruneOneBlockFile(fileNumber);
         setFilesToPrune.insert(fileNumber);
         count++;
     }
     LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n",
               nLastBlockWeCanPrune, count);
 }
 
 /* This function is called from the RPC code for pruneblockchain */
 void PruneBlockFilesManual(int nManualPruneHeight) {
     CValidationState state;
     FlushStateToDisk(state, FLUSH_STATE_NONE, nManualPruneHeight);
 }
 
 /* Calculate the block/rev files that should be deleted to remain under target*/
 void FindFilesToPrune(std::set<int> &setFilesToPrune,
                       uint64_t nPruneAfterHeight) {
     LOCK2(cs_main, cs_LastBlockFile);
     if (chainActive.Tip() == nullptr || nPruneTarget == 0) {
         return;
     }
     if (uint64_t(chainActive.Tip()->nHeight) <= nPruneAfterHeight) {
         return;
     }
 
     unsigned int nLastBlockWeCanPrune =
         chainActive.Tip()->nHeight - MIN_BLOCKS_TO_KEEP;
     uint64_t nCurrentUsage = CalculateCurrentUsage();
     // We don't check to prune until after we've allocated new space for files,
     // so we should leave a buffer under our target to account for another
     // allocation before the next pruning.
     uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
     uint64_t nBytesToPrune;
     int count = 0;
 
     if (nCurrentUsage + nBuffer >= nPruneTarget) {
         for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
             nBytesToPrune = vinfoBlockFile[fileNumber].nSize +
                             vinfoBlockFile[fileNumber].nUndoSize;
 
             if (vinfoBlockFile[fileNumber].nSize == 0) {
                 continue;
             }
 
             // are we below our target?
             if (nCurrentUsage + nBuffer < nPruneTarget) {
                 break;
             }
 
             // don't prune files that could have a block within
             // MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
             if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
                 continue;
             }
 
             PruneOneBlockFile(fileNumber);
             // Queue up the files for removal
             setFilesToPrune.insert(fileNumber);
             nCurrentUsage -= nBytesToPrune;
             count++;
         }
     }
 
     LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB "
                            "max_prune_height=%d removed %d blk/rev pairs\n",
              nPruneTarget / 1024 / 1024, nCurrentUsage / 1024 / 1024,
              ((int64_t)nPruneTarget - (int64_t)nCurrentUsage) / 1024 / 1024,
              nLastBlockWeCanPrune, count);
 }
 
 bool CheckDiskSpace(uint64_t nAdditionalBytes) {
     uint64_t nFreeBytesAvailable = fs::space(GetDataDir()).available;
 
     // Check for nMinDiskSpace bytes (currently 50MB)
     if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
         return AbortNode("Disk space is low!", _("Error: Disk space is low!"));
 
     return true;
 }
 
 FILE *OpenDiskFile(const CDiskBlockPos &pos, const char *prefix,
                    bool fReadOnly) {
     if (pos.IsNull()) return nullptr;
     fs::path path = GetBlockPosFilename(pos, prefix);
     fs::create_directories(path.parent_path());
     FILE *file = fsbridge::fopen(path, "rb+");
     if (!file && !fReadOnly) {
         file = fsbridge::fopen(path, "wb+");
     }
     if (!file) {
         LogPrintf("Unable to open file %s\n", path.string());
         return nullptr;
     }
     if (pos.nPos) {
         if (fseek(file, pos.nPos, SEEK_SET)) {
             LogPrintf("Unable to seek to position %u of %s\n", pos.nPos,
                       path.string());
             fclose(file);
             return nullptr;
         }
     }
     return file;
 }
 
 FILE *OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
     return OpenDiskFile(pos, "blk", fReadOnly);
 }
 
 FILE *OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
     return OpenDiskFile(pos, "rev", fReadOnly);
 }
 
 fs::path GetBlockPosFilename(const CDiskBlockPos &pos, const char *prefix) {
     return GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
 }
 
 CBlockIndex *InsertBlockIndex(uint256 hash) {
     if (hash.IsNull()) return nullptr;
 
     // Return existing
     BlockMap::iterator mi = mapBlockIndex.find(hash);
     if (mi != mapBlockIndex.end()) return (*mi).second;
 
     // Create new
     CBlockIndex *pindexNew = new CBlockIndex();
     if (!pindexNew)
         throw std::runtime_error(std::string(__func__) +
                                  ": new CBlockIndex failed");
     mi = mapBlockIndex.insert(std::make_pair(hash, pindexNew)).first;
     pindexNew->phashBlock = &((*mi).first);
 
     return pindexNew;
 }
 
 static bool LoadBlockIndexDB(const CChainParams &chainparams) {
     if (!pblocktree->LoadBlockIndexGuts(InsertBlockIndex)) return false;
 
     boost::this_thread::interruption_point();
 
     // Calculate nChainWork
     std::vector<std::pair<int, CBlockIndex *>> vSortedByHeight;
     vSortedByHeight.reserve(mapBlockIndex.size());
     for (const std::pair<uint256, CBlockIndex *> &item : mapBlockIndex) {
         CBlockIndex *pindex = item.second;
         vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
     }
     sort(vSortedByHeight.begin(), vSortedByHeight.end());
     for (const std::pair<int, CBlockIndex *> &item : vSortedByHeight) {
         CBlockIndex *pindex = item.second;
         pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) +
                              GetBlockProof(*pindex);
         pindex->nTimeMax =
             (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime)
                            : pindex->nTime);
         // We can link the chain of blocks for which we've received transactions
         // at some point. Pruned nodes may have deleted the block.
         if (pindex->nTx > 0) {
             if (pindex->pprev) {
                 if (pindex->pprev->nChainTx) {
                     pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
                 } else {
                     pindex->nChainTx = 0;
                     mapBlocksUnlinked.insert(
                         std::make_pair(pindex->pprev, pindex));
                 }
             } else {
                 pindex->nChainTx = pindex->nTx;
             }
         }
         if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
             (pindex->nChainTx || pindex->pprev == nullptr)) {
             setBlockIndexCandidates.insert(pindex);
         }
         if (pindex->nStatus & BLOCK_FAILED_MASK &&
             (!pindexBestInvalid ||
              pindex->nChainWork > pindexBestInvalid->nChainWork)) {
             pindexBestInvalid = pindex;
         }
         if (pindex->pprev) {
             pindex->BuildSkip();
         }
         if (pindex->IsValid(BLOCK_VALID_TREE) &&
             (pindexBestHeader == nullptr ||
              CBlockIndexWorkComparator()(pindexBestHeader, pindex))) {
             pindexBestHeader = pindex;
         }
     }
 
     // Load block file info
     pblocktree->ReadLastBlockFile(nLastBlockFile);
     vinfoBlockFile.resize(nLastBlockFile + 1);
     LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
     for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
         pblocktree->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
     }
     LogPrintf("%s: last block file info: %s\n", __func__,
               vinfoBlockFile[nLastBlockFile].ToString());
     for (int nFile = nLastBlockFile + 1; true; nFile++) {
         CBlockFileInfo info;
         if (pblocktree->ReadBlockFileInfo(nFile, info)) {
             vinfoBlockFile.push_back(info);
         } else {
             break;
         }
     }
 
     // Check presence of blk files
     LogPrintf("Checking all blk files are present...\n");
     std::set<int> setBlkDataFiles;
     for (const std::pair<uint256, CBlockIndex *> &item : mapBlockIndex) {
         CBlockIndex *pindex = item.second;
         if (pindex->nStatus & BLOCK_HAVE_DATA) {
             setBlkDataFiles.insert(pindex->nFile);
         }
     }
     for (std::set<int>::iterator it = setBlkDataFiles.begin();
          it != setBlkDataFiles.end(); it++) {
         CDiskBlockPos pos(*it, 0);
         if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION)
                 .IsNull()) {
             return false;
         }
     }
 
     // Check whether we have ever pruned block & undo files
     pblocktree->ReadFlag("prunedblockfiles", fHavePruned);
     if (fHavePruned) {
         LogPrintf(
             "LoadBlockIndexDB(): Block files have previously been pruned\n");
     }
 
     // Check whether we need to continue reindexing
     bool fReindexing = false;
     pblocktree->ReadReindexing(fReindexing);
     fReindex |= fReindexing;
 
     // Check whether we have a transaction index
     pblocktree->ReadFlag("txindex", fTxIndex);
     LogPrintf("%s: transaction index %s\n", __func__,
               fTxIndex ? "enabled" : "disabled");
 
     // Load pointer to end of best chain
     BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock());
     if (it == mapBlockIndex.end()) {
         return true;
     }
     chainActive.SetTip(it->second);
 
     PruneBlockIndexCandidates();
 
     LogPrintf(
         "%s: hashBestChain=%s height=%d date=%s progress=%f\n", __func__,
         chainActive.Tip()->GetBlockHash().ToString(), chainActive.Height(),
         DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
                           chainActive.Tip()->GetBlockTime()),
         GuessVerificationProgress(chainparams.TxData(), chainActive.Tip()));
 
     return true;
 }
 
 CVerifyDB::CVerifyDB() {
     uiInterface.ShowProgress(_("Verifying blocks..."), 0);
 }
 
 CVerifyDB::~CVerifyDB() {
     uiInterface.ShowProgress("", 100);
 }
 
 bool CVerifyDB::VerifyDB(const Config &config, CCoinsView *coinsview,
                          int nCheckLevel, int nCheckDepth) {
     LOCK(cs_main);
     if (chainActive.Tip() == nullptr || chainActive.Tip()->pprev == nullptr) {
         return true;
     }
 
     // Verify blocks in the best chain
     if (nCheckDepth <= 0) {
         // suffices until the year 19000
         nCheckDepth = 1000000000;
     }
 
     if (nCheckDepth > chainActive.Height()) {
         nCheckDepth = chainActive.Height();
     }
 
     nCheckLevel = std::max(0, std::min(4, nCheckLevel));
     LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth,
               nCheckLevel);
 
     const CChainParams &chainparams = config.GetChainParams();
 
     CCoinsViewCache coins(coinsview);
     CBlockIndex *pindexState = chainActive.Tip();
     CBlockIndex *pindexFailure = nullptr;
     int nGoodTransactions = 0;
     CValidationState state;
     int reportDone = 0;
     LogPrintf("[0%%]...");
     for (CBlockIndex *pindex = chainActive.Tip(); pindex && pindex->pprev;
          pindex = pindex->pprev) {
         boost::this_thread::interruption_point();
         int percentageDone = std::max(
             1, std::min(
                    99,
                    (int)(((double)(chainActive.Height() - pindex->nHeight)) /
                          (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
 
         if (reportDone < percentageDone / 10) {
             // report every 10% step
             LogPrintf("[%d%%]...", percentageDone);
             reportDone = percentageDone / 10;
         }
 
         uiInterface.ShowProgress(_("Verifying blocks..."), percentageDone);
         if (pindex->nHeight < chainActive.Height() - nCheckDepth) {
             break;
         }
 
         if (fPruneMode && !(pindex->nStatus & BLOCK_HAVE_DATA)) {
             // If pruning, only go back as far as we have data.
             LogPrintf("VerifyDB(): block verification stopping at height %d "
                       "(pruning, no data)\n",
                       pindex->nHeight);
             break;
         }
 
         CBlock block;
 
         // check level 0: read from disk
         if (!ReadBlockFromDisk(block, pindex, config)) {
             return error(
                 "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
                 pindex->nHeight, pindex->GetBlockHash().ToString());
         }
 
         // check level 1: verify block validity
         if (nCheckLevel >= 1 && !CheckBlock(config, block, state)) {
             return error("%s: *** found bad block at %d, hash=%s (%s)\n",
                          __func__, pindex->nHeight,
                          pindex->GetBlockHash().ToString(),
                          FormatStateMessage(state));
         }
 
         // check level 2: verify undo validity
         if (nCheckLevel >= 2 && pindex) {
             CBlockUndo undo;
             CDiskBlockPos pos = pindex->GetUndoPos();
             if (!pos.IsNull()) {
                 if (!UndoReadFromDisk(undo, pos,
                                       pindex->pprev->GetBlockHash())) {
                     return error(
                         "VerifyDB(): *** found bad undo data at %d, hash=%s\n",
                         pindex->nHeight, pindex->GetBlockHash().ToString());
                 }
             }
         }
 
         // check level 3: check for inconsistencies during memory-only
         // disconnect of tip blocks
         if (nCheckLevel >= 3 && pindex == pindexState &&
             (coins.DynamicMemoryUsage() + pcoinsTip->DynamicMemoryUsage()) <=
                 nCoinCacheUsage) {
             DisconnectResult res = DisconnectBlock(block, pindex, coins);
             if (res == DISCONNECT_FAILED) {
                 return error("VerifyDB(): *** irrecoverable inconsistency in "
                              "block data at %d, hash=%s",
                              pindex->nHeight,
                              pindex->GetBlockHash().ToString());
             }
             pindexState = pindex->pprev;
             if (res == DISCONNECT_UNCLEAN) {
                 nGoodTransactions = 0;
                 pindexFailure = pindex;
             } else {
                 nGoodTransactions += block.vtx.size();
             }
         }
 
         if (ShutdownRequested()) {
             return true;
         }
     }
 
     if (pindexFailure) {
         return error("VerifyDB(): *** coin database inconsistencies found "
                      "(last %i blocks, %i good transactions before that)\n",
                      chainActive.Height() - pindexFailure->nHeight + 1,
                      nGoodTransactions);
     }
 
     // check level 4: try reconnecting blocks
     if (nCheckLevel >= 4) {
         CBlockIndex *pindex = pindexState;
         while (pindex != chainActive.Tip()) {
             boost::this_thread::interruption_point();
             uiInterface.ShowProgress(
                 _("Verifying blocks..."),
                 std::max(1,
                          std::min(99,
                                   100 - (int)(((double)(chainActive.Height() -
                                                         pindex->nHeight)) /
                                               (double)nCheckDepth * 50))));
             pindex = chainActive.Next(pindex);
             CBlock block;
             if (!ReadBlockFromDisk(block, pindex, config)) {
                 return error(
                     "VerifyDB(): *** ReadBlockFromDisk failed at %d, hash=%s",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
             }
             if (!ConnectBlock(config, block, state, pindex, coins,
                               chainparams)) {
                 return error(
                     "VerifyDB(): *** found unconnectable block at %d, hash=%s",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
             }
         }
     }
 
     LogPrintf("[DONE].\n");
     LogPrintf("No coin database inconsistencies in last %i blocks (%i "
               "transactions)\n",
               chainActive.Height() - pindexState->nHeight, nGoodTransactions);
 
     return true;
 }
 
 bool RewindBlockIndex(const Config &config) {
     LOCK(cs_main);
 
     int nHeight = chainActive.Height() + 1;
 
     // nHeight is now the height of the first insufficiently-validated block, or
     // tipheight + 1
     CValidationState state;
     CBlockIndex *pindex = chainActive.Tip();
     while (chainActive.Height() >= nHeight) {
         if (fPruneMode && !(chainActive.Tip()->nStatus & BLOCK_HAVE_DATA)) {
             // If pruning, don't try rewinding past the HAVE_DATA point; since
             // older blocks can't be served anyway, there's no need to walk
             // further, and trying to DisconnectTip() will fail (and require a
             // needless reindex/redownload of the blockchain).
             break;
         }
         if (!DisconnectTip(config, state, true)) {
             return error(
                 "RewindBlockIndex: unable to disconnect block at height %i",
                 pindex->nHeight);
         }
         // Occasionally flush state to disk.
         if (!FlushStateToDisk(state, FLUSH_STATE_PERIODIC)) {
             return false;
         }
     }
 
     // Reduce validity flag and have-data flags.
     // We do this after actual disconnecting, otherwise we'll end up writing the
     // lack of data to disk before writing the chainstate, resulting in a
     // failure to continue if interrupted.
     for (BlockMap::iterator it = mapBlockIndex.begin();
          it != mapBlockIndex.end(); it++) {
         CBlockIndex *pindexIter = it->second;
 
         if (pindexIter->IsValid(BLOCK_VALID_TRANSACTIONS) &&
             pindexIter->nChainTx) {
             setBlockIndexCandidates.insert(pindexIter);
         }
     }
 
     PruneBlockIndexCandidates();
 
     CheckBlockIndex(config.GetChainParams().GetConsensus());
 
     if (!FlushStateToDisk(state, FLUSH_STATE_ALWAYS)) {
         return false;
     }
 
     return true;
 }
 
 // May NOT be used after any connections are up as much of the peer-processing
 // logic assumes a consistent block index state
 void UnloadBlockIndex() {
     LOCK(cs_main);
     setBlockIndexCandidates.clear();
     chainActive.SetTip(nullptr);
     pindexBestInvalid = nullptr;
     pindexBestHeader = nullptr;
     mempool.clear();
     mapBlocksUnlinked.clear();
     vinfoBlockFile.clear();
     nLastBlockFile = 0;
     nBlockSequenceId = 1;
     setDirtyBlockIndex.clear();
     setDirtyFileInfo.clear();
     versionbitscache.Clear();
     for (int b = 0; b < VERSIONBITS_NUM_BITS; b++) {
         warningcache[b].clear();
     }
 
     for (BlockMap::value_type &entry : mapBlockIndex) {
         delete entry.second;
     }
     mapBlockIndex.clear();
     fHavePruned = false;
 }
 
 bool LoadBlockIndex(const CChainParams &chainparams) {
     // Load block index from databases
     if (!fReindex && !LoadBlockIndexDB(chainparams)) {
         return false;
     }
     return true;
 }
 
 bool InitBlockIndex(const Config &config) {
     LOCK(cs_main);
 
     // Check whether we're already initialized
     if (chainActive.Genesis() != nullptr) {
         return true;
     }
 
     // Use the provided setting for -txindex in the new database
     fTxIndex = GetBoolArg("-txindex", DEFAULT_TXINDEX);
     pblocktree->WriteFlag("txindex", fTxIndex);
     LogPrintf("Initializing databases...\n");
 
     // Only add the genesis block if not reindexing (in which case we reuse the
     // one already on disk)
     if (!fReindex) {
         try {
             const CChainParams &chainparams = config.GetChainParams();
             CBlock &block = const_cast<CBlock &>(chainparams.GenesisBlock());
             // Start new block file
             unsigned int nBlockSize =
                 ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
             CDiskBlockPos blockPos;
             CValidationState state;
             if (!FindBlockPos(state, blockPos, nBlockSize + 8, 0,
                               block.GetBlockTime())) {
                 return error("LoadBlockIndex(): FindBlockPos failed");
             }
             if (!WriteBlockToDisk(block, blockPos, chainparams.DiskMagic())) {
                 return error(
                     "LoadBlockIndex(): writing genesis block to disk failed");
             }
             CBlockIndex *pindex = AddToBlockIndex(block);
             if (!ReceivedBlockTransactions(block, state, pindex, blockPos)) {
                 return error("LoadBlockIndex(): genesis block not accepted");
             }
             // Force a chainstate write so that when we VerifyDB in a moment, it
             // doesn't check stale data
             return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
         } catch (const std::runtime_error &e) {
             return error(
                 "LoadBlockIndex(): failed to initialize block database: %s",
                 e.what());
         }
     }
 
     return true;
 }
 
 bool LoadExternalBlockFile(const Config &config, FILE *fileIn,
                            CDiskBlockPos *dbp) {
     // Map of disk positions for blocks with unknown parent (only used for
     // reindex)
     static std::multimap<uint256, CDiskBlockPos> mapBlocksUnknownParent;
     int64_t nStart = GetTimeMillis();
 
     const CChainParams &chainparams = config.GetChainParams();
 
     int nLoaded = 0;
     try {
         // This takes over fileIn and calls fclose() on it in the CBufferedFile
         // destructor. Make sure we have at least 2*MAX_TX_SIZE space in there
         // so any transaction can fit in the buffer.
         CBufferedFile blkdat(fileIn, 2 * MAX_TX_SIZE, MAX_TX_SIZE + 8, SER_DISK,
                              CLIENT_VERSION);
         uint64_t nRewind = blkdat.GetPos();
         while (!blkdat.eof()) {
             boost::this_thread::interruption_point();
 
             blkdat.SetPos(nRewind);
             // Start one byte further next time, in case of failure.
             nRewind++;
             // Remove former limit.
             blkdat.SetLimit();
             unsigned int nSize = 0;
             try {
                 // Locate a header.
                 uint8_t buf[CMessageHeader::MESSAGE_START_SIZE];
                 blkdat.FindByte(chainparams.DiskMagic()[0]);
                 nRewind = blkdat.GetPos() + 1;
                 blkdat >> FLATDATA(buf);
                 if (memcmp(buf, std::begin(chainparams.DiskMagic()),
                            CMessageHeader::MESSAGE_START_SIZE)) {
                     continue;
                 }
                 // Read size.
                 blkdat >> nSize;
                 if (nSize < 80) {
                     continue;
                 }
             } catch (const std::exception &) {
                 // No valid block header found; don't complain.
                 break;
             }
             try {
                 // read block
                 uint64_t nBlockPos = blkdat.GetPos();
                 if (dbp) {
                     dbp->nPos = nBlockPos;
                 }
                 blkdat.SetLimit(nBlockPos + nSize);
                 blkdat.SetPos(nBlockPos);
                 std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
                 CBlock &block = *pblock;
                 blkdat >> block;
                 nRewind = blkdat.GetPos();
 
                 // detect out of order blocks, and store them for later
                 uint256 hash = block.GetHash();
                 if (hash != chainparams.GetConsensus().hashGenesisBlock &&
                     mapBlockIndex.find(block.hashPrevBlock) ==
                         mapBlockIndex.end()) {
                     LogPrint(BCLog::REINDEX,
                              "%s: Out of order block %s, parent %s not known\n",
                              __func__, hash.ToString(),
                              block.hashPrevBlock.ToString());
                     if (dbp) {
                         mapBlocksUnknownParent.insert(
                             std::make_pair(block.hashPrevBlock, *dbp));
                     }
                     continue;
                 }
 
                 // process in case the block isn't known yet
                 if (mapBlockIndex.count(hash) == 0 ||
                     (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) {
                     LOCK(cs_main);
                     CValidationState state;
                     if (AcceptBlock(config, pblock, state, nullptr, true, dbp,
                                     nullptr)) {
                         nLoaded++;
                     }
                     if (state.IsError()) {
                         break;
                     }
                 } else if (hash !=
                                chainparams.GetConsensus().hashGenesisBlock &&
                            mapBlockIndex[hash]->nHeight % 1000 == 0) {
                     LogPrint(
                         BCLog::REINDEX,
                         "Block Import: already had block %s at height %d\n",
                         hash.ToString(), mapBlockIndex[hash]->nHeight);
                 }
 
                 // Activate the genesis block so normal node progress can
                 // continue
                 if (hash == chainparams.GetConsensus().hashGenesisBlock) {
                     CValidationState state;
                     if (!ActivateBestChain(config, state)) {
                         break;
                     }
                 }
 
                 NotifyHeaderTip();
 
                 // Recursively process earlier encountered successors of this
                 // block
                 std::deque<uint256> queue;
                 queue.push_back(hash);
                 while (!queue.empty()) {
                     uint256 head = queue.front();
                     queue.pop_front();
                     std::pair<std::multimap<uint256, CDiskBlockPos>::iterator,
                               std::multimap<uint256, CDiskBlockPos>::iterator>
                         range = mapBlocksUnknownParent.equal_range(head);
                     while (range.first != range.second) {
                         std::multimap<uint256, CDiskBlockPos>::iterator it =
                             range.first;
                         std::shared_ptr<CBlock> pblockrecursive =
                             std::make_shared<CBlock>();
                         if (ReadBlockFromDisk(*pblockrecursive, it->second,
                                               config)) {
                             LogPrint(
                                 BCLog::REINDEX,
                                 "%s: Processing out of order child %s of %s\n",
                                 __func__, pblockrecursive->GetHash().ToString(),
                                 head.ToString());
                             LOCK(cs_main);
                             CValidationState dummy;
                             if (AcceptBlock(config, pblockrecursive, dummy,
                                             nullptr, true, &it->second,
                                             nullptr)) {
                                 nLoaded++;
                                 queue.push_back(pblockrecursive->GetHash());
                             }
                         }
                         range.first++;
                         mapBlocksUnknownParent.erase(it);
                         NotifyHeaderTip();
                     }
                 }
             } catch (const std::exception &e) {
                 LogPrintf("%s: Deserialize or I/O error - %s\n", __func__,
                           e.what());
             }
         }
     } catch (const std::runtime_error &e) {
         AbortNode(std::string("System error: ") + e.what());
     }
     if (nLoaded > 0) {
         LogPrintf("Loaded %i blocks from external file in %dms\n", nLoaded,
                   GetTimeMillis() - nStart);
     }
     return nLoaded > 0;
 }
 
 static void CheckBlockIndex(const Consensus::Params &consensusParams) {
     if (!fCheckBlockIndex) {
         return;
     }
 
     LOCK(cs_main);
 
     // During a reindex, we read the genesis block and call CheckBlockIndex
     // before ActivateBestChain, so we have the genesis block in mapBlockIndex
     // but no active chain. (A few of the tests when iterating the block tree
     // require that chainActive has been initialized.)
     if (chainActive.Height() < 0) {
         assert(mapBlockIndex.size() <= 1);
         return;
     }
 
     // Build forward-pointing map of the entire block tree.
     std::multimap<CBlockIndex *, CBlockIndex *> forward;
     for (BlockMap::iterator it = mapBlockIndex.begin();
          it != mapBlockIndex.end(); it++) {
         forward.insert(std::make_pair(it->second->pprev, it->second));
     }
 
     assert(forward.size() == mapBlockIndex.size());
 
     std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
               std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
         rangeGenesis = forward.equal_range(nullptr);
     CBlockIndex *pindex = rangeGenesis.first->second;
     rangeGenesis.first++;
     // There is only one index entry with parent nullptr.
     assert(rangeGenesis.first == rangeGenesis.second);
 
     // Iterate over the entire block tree, using depth-first search.
     // Along the way, remember whether there are blocks on the path from genesis
     // block being explored which are the first to have certain properties.
     size_t nNodes = 0;
     int nHeight = 0;
     // Oldest ancestor of pindex which is invalid.
     CBlockIndex *pindexFirstInvalid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_HAVE_DATA.
     CBlockIndex *pindexFirstMissing = nullptr;
     // Oldest ancestor of pindex for which nTx == 0.
     CBlockIndex *pindexFirstNeverProcessed = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_TREE
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotTreeValid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_TRANSACTIONS
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotTransactionsValid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_CHAIN
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotChainValid = nullptr;
     // Oldest ancestor of pindex which does not have BLOCK_VALID_SCRIPTS
     // (regardless of being valid or not).
     CBlockIndex *pindexFirstNotScriptsValid = nullptr;
     while (pindex != nullptr) {
         nNodes++;
         if (pindexFirstInvalid == nullptr &&
             pindex->nStatus & BLOCK_FAILED_VALID) {
             pindexFirstInvalid = pindex;
         }
         if (pindexFirstMissing == nullptr &&
             !(pindex->nStatus & BLOCK_HAVE_DATA)) {
             pindexFirstMissing = pindex;
         }
         if (pindexFirstNeverProcessed == nullptr && pindex->nTx == 0) {
             pindexFirstNeverProcessed = pindex;
         }
         if (pindex->pprev != nullptr && pindexFirstNotTreeValid == nullptr &&
             (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TREE) {
             pindexFirstNotTreeValid = pindex;
         }
         if (pindex->pprev != nullptr &&
             pindexFirstNotTransactionsValid == nullptr &&
             (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_TRANSACTIONS) {
             pindexFirstNotTransactionsValid = pindex;
         }
         if (pindex->pprev != nullptr && pindexFirstNotChainValid == nullptr &&
             (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_CHAIN) {
             pindexFirstNotChainValid = pindex;
         }
         if (pindex->pprev != nullptr && pindexFirstNotScriptsValid == nullptr &&
             (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS) {
             pindexFirstNotScriptsValid = pindex;
         }
 
         // Begin: actual consistency checks.
         if (pindex->pprev == nullptr) {
             // Genesis block checks.
             // Genesis block's hash must match.
             assert(pindex->GetBlockHash() == consensusParams.hashGenesisBlock);
             // The current active chain's genesis block must be this block.
             assert(pindex == chainActive.Genesis());
         }
         if (pindex->nChainTx == 0) {
             // nSequenceId can't be set positive for blocks that aren't linked
             // (negative is used for preciousblock)
             assert(pindex->nSequenceId <= 0);
         }
         // VALID_TRANSACTIONS is equivalent to nTx > 0 for all nodes (whether or
         // not pruning has occurred). HAVE_DATA is only equivalent to nTx > 0
         // (or VALID_TRANSACTIONS) if no pruning has occurred.
         if (!fHavePruned) {
             // If we've never pruned, then HAVE_DATA should be equivalent to nTx
             // > 0
             assert(!(pindex->nStatus & BLOCK_HAVE_DATA) == (pindex->nTx == 0));
             assert(pindexFirstMissing == pindexFirstNeverProcessed);
         } else {
             // If we have pruned, then we can only say that HAVE_DATA implies
             // nTx > 0
             if (pindex->nStatus & BLOCK_HAVE_DATA) assert(pindex->nTx > 0);
         }
         if (pindex->nStatus & BLOCK_HAVE_UNDO) {
             assert(pindex->nStatus & BLOCK_HAVE_DATA);
         }
         // This is pruning-independent.
         assert(((pindex->nStatus & BLOCK_VALID_MASK) >=
                 BLOCK_VALID_TRANSACTIONS) == (pindex->nTx > 0));
         // All parents having had data (at some point) is equivalent to all
         // parents being VALID_TRANSACTIONS, which is equivalent to nChainTx
         // being set.
         // nChainTx != 0 is used to signal that all parent blocks have been
         // processed (but may have been pruned).
         assert((pindexFirstNeverProcessed != nullptr) ==
                (pindex->nChainTx == 0));
         assert((pindexFirstNotTransactionsValid != nullptr) ==
                (pindex->nChainTx == 0));
         // nHeight must be consistent.
         assert(pindex->nHeight == nHeight);
         // For every block except the genesis block, the chainwork must be
         // larger than the parent's.
         assert(pindex->pprev == nullptr ||
                pindex->nChainWork >= pindex->pprev->nChainWork);
         // The pskip pointer must point back for all but the first 2 blocks.
         assert(nHeight < 2 ||
                (pindex->pskip && (pindex->pskip->nHeight < nHeight)));
         // All mapBlockIndex entries must at least be TREE valid
         assert(pindexFirstNotTreeValid == nullptr);
         if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TREE) {
             // TREE valid implies all parents are TREE valid
             assert(pindexFirstNotTreeValid == nullptr);
         }
         if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_CHAIN) {
             // CHAIN valid implies all parents are CHAIN valid
             assert(pindexFirstNotChainValid == nullptr);
         }
         if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_SCRIPTS) {
             // SCRIPTS valid implies all parents are SCRIPTS valid
             assert(pindexFirstNotScriptsValid == nullptr);
         }
         if (pindexFirstInvalid == nullptr) {
             // Checks for not-invalid blocks.
             // The failed mask cannot be set for blocks without invalid parents.
             assert((pindex->nStatus & BLOCK_FAILED_MASK) == 0);
         }
         if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) &&
             pindexFirstNeverProcessed == nullptr) {
             if (pindexFirstInvalid == nullptr) {
                 // If this block sorts at least as good as the current tip and
                 // is valid and we have all data for its parents, it must be in
                 // setBlockIndexCandidates. chainActive.Tip() must also be there
                 // even if some data has been pruned.
                 if (pindexFirstMissing == nullptr ||
                     pindex == chainActive.Tip()) {
                     assert(setBlockIndexCandidates.count(pindex));
                 }
                 // If some parent is missing, then it could be that this block
                 // was in setBlockIndexCandidates but had to be removed because
                 // of the missing data. In this case it must be in
                 // mapBlocksUnlinked -- see test below.
             }
         } else {
             // If this block sorts worse than the current tip or some ancestor's
             // block has never been seen, it cannot be in
             // setBlockIndexCandidates.
             assert(setBlockIndexCandidates.count(pindex) == 0);
         }
         // Check whether this block is in mapBlocksUnlinked.
         std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                   std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
             rangeUnlinked = mapBlocksUnlinked.equal_range(pindex->pprev);
         bool foundInUnlinked = false;
         while (rangeUnlinked.first != rangeUnlinked.second) {
             assert(rangeUnlinked.first->first == pindex->pprev);
             if (rangeUnlinked.first->second == pindex) {
                 foundInUnlinked = true;
                 break;
             }
             rangeUnlinked.first++;
         }
         if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) &&
             pindexFirstNeverProcessed != nullptr &&
             pindexFirstInvalid == nullptr) {
             // If this block has block data available, some parent was never
             // received, and has no invalid parents, it must be in
             // mapBlocksUnlinked.
             assert(foundInUnlinked);
         }
         if (!(pindex->nStatus & BLOCK_HAVE_DATA)) {
             // Can't be in mapBlocksUnlinked if we don't HAVE_DATA
             assert(!foundInUnlinked);
         }
         if (pindexFirstMissing == nullptr) {
             // We aren't missing data for any parent -- cannot be in
             // mapBlocksUnlinked.
             assert(!foundInUnlinked);
         }
         if (pindex->pprev && (pindex->nStatus & BLOCK_HAVE_DATA) &&
             pindexFirstNeverProcessed == nullptr &&
             pindexFirstMissing != nullptr) {
             // We HAVE_DATA for this block, have received data for all parents
             // at some point, but we're currently missing data for some parent.
             // We must have pruned.
             assert(fHavePruned);
             // This block may have entered mapBlocksUnlinked if:
             //  - it has a descendant that at some point had more work than the
             //    tip, and
             //  - we tried switching to that descendant but were missing
             //    data for some intermediate block between chainActive and the
             //    tip.
             // So if this block is itself better than chainActive.Tip() and it
             // wasn't in
             // setBlockIndexCandidates, then it must be in mapBlocksUnlinked.
             if (!CBlockIndexWorkComparator()(pindex, chainActive.Tip()) &&
                 setBlockIndexCandidates.count(pindex) == 0) {
                 if (pindexFirstInvalid == nullptr) {
                     assert(foundInUnlinked);
                 }
             }
         }
         // assert(pindex->GetBlockHash() == pindex->GetBlockHeader().GetHash());
         // // Perhaps too slow
         // End: actual consistency checks.
 
         // Try descending into the first subnode.
         std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                   std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
             range = forward.equal_range(pindex);
         if (range.first != range.second) {
             // A subnode was found.
             pindex = range.first->second;
             nHeight++;
             continue;
         }
         // This is a leaf node. Move upwards until we reach a node of which we
         // have not yet visited the last child.
         while (pindex) {
             // We are going to either move to a parent or a sibling of pindex.
             // If pindex was the first with a certain property, unset the
             // corresponding variable.
             if (pindex == pindexFirstInvalid) {
                 pindexFirstInvalid = nullptr;
             }
             if (pindex == pindexFirstMissing) {
                 pindexFirstMissing = nullptr;
             }
             if (pindex == pindexFirstNeverProcessed) {
                 pindexFirstNeverProcessed = nullptr;
             }
             if (pindex == pindexFirstNotTreeValid) {
                 pindexFirstNotTreeValid = nullptr;
             }
             if (pindex == pindexFirstNotTransactionsValid) {
                 pindexFirstNotTransactionsValid = nullptr;
             }
             if (pindex == pindexFirstNotChainValid) {
                 pindexFirstNotChainValid = nullptr;
             }
             if (pindex == pindexFirstNotScriptsValid) {
                 pindexFirstNotScriptsValid = nullptr;
             }
             // Find our parent.
             CBlockIndex *pindexPar = pindex->pprev;
             // Find which child we just visited.
             std::pair<std::multimap<CBlockIndex *, CBlockIndex *>::iterator,
                       std::multimap<CBlockIndex *, CBlockIndex *>::iterator>
                 rangePar = forward.equal_range(pindexPar);
             while (rangePar.first->second != pindex) {
                 // Our parent must have at least the node we're coming from as
                 // child.
                 assert(rangePar.first != rangePar.second);
                 rangePar.first++;
             }
             // Proceed to the next one.
             rangePar.first++;
             if (rangePar.first != rangePar.second) {
                 // Move to the sibling.
                 pindex = rangePar.first->second;
                 break;
             } else {
                 // Move up further.
                 pindex = pindexPar;
                 nHeight--;
                 continue;
             }
         }
     }
 
     // Check that we actually traversed the entire map.
     assert(nNodes == forward.size());
 }
 
 std::string CBlockFileInfo::ToString() const {
     return strprintf(
         "CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)",
         nBlocks, nSize, nHeightFirst, nHeightLast,
         DateTimeStrFormat("%Y-%m-%d", nTimeFirst),
         DateTimeStrFormat("%Y-%m-%d", nTimeLast));
 }
 
 CBlockFileInfo *GetBlockFileInfo(size_t n) {
     return &vinfoBlockFile.at(n);
 }
 
 ThresholdState VersionBitsTipState(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos) {
     LOCK(cs_main);
     return VersionBitsState(chainActive.Tip(), params, pos, versionbitscache);
 }
 
 int VersionBitsTipStateSinceHeight(const Consensus::Params &params,
                                    Consensus::DeploymentPos pos) {
     LOCK(cs_main);
     return VersionBitsStateSinceHeight(chainActive.Tip(), params, pos,
                                        versionbitscache);
 }
 
 static const uint64_t MEMPOOL_DUMP_VERSION = 1;
 
 bool LoadMempool(const Config &config) {
     int64_t nExpiryTimeout =
         GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60;
     FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat", "rb");
     CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
     if (file.IsNull()) {
         LogPrintf(
             "Failed to open mempool file from disk. Continuing anyway.\n");
         return false;
     }
 
     int64_t count = 0;
     int64_t skipped = 0;
     int64_t failed = 0;
     int64_t nNow = GetTime();
 
     try {
         uint64_t version;
         file >> version;
         if (version != MEMPOOL_DUMP_VERSION) {
             return false;
         }
         uint64_t num;
         file >> num;
         double prioritydummy = 0;
         while (num--) {
             CTransactionRef tx;
             int64_t nTime;
             int64_t nFeeDelta;
             file >> tx;
             file >> nTime;
             file >> nFeeDelta;
 
             Amount amountdelta(nFeeDelta);
             if (amountdelta != Amount(0)) {
                 mempool.PrioritiseTransaction(tx->GetId(),
                                               tx->GetId().ToString(),
                                               prioritydummy, amountdelta);
             }
             CValidationState state;
             if (nTime + nExpiryTimeout > nNow) {
                 LOCK(cs_main);
                 AcceptToMemoryPoolWithTime(config, mempool, state, tx, true,
                                            nullptr, nTime);
                 if (state.IsValid()) {
                     ++count;
                 } else {
                     ++failed;
                 }
             } else {
                 ++skipped;
             }
             if (ShutdownRequested()) return false;
         }
         std::map<uint256, Amount> mapDeltas;
         file >> mapDeltas;
 
         for (const auto &i : mapDeltas) {
             mempool.PrioritiseTransaction(i.first, i.first.ToString(),
                                           prioritydummy, i.second);
         }
     } catch (const std::exception &e) {
         LogPrintf("Failed to deserialize mempool data on disk: %s. Continuing "
                   "anyway.\n",
                   e.what());
         return false;
     }
 
     LogPrintf("Imported mempool transactions from disk: %i successes, %i "
               "failed, %i expired\n",
               count, failed, skipped);
     return true;
 }
 
 void DumpMempool(void) {
     int64_t start = GetTimeMicros();
 
     std::map<uint256, Amount> mapDeltas;
     std::vector<TxMempoolInfo> vinfo;
 
     {
         LOCK(mempool.cs);
         for (const auto &i : mempool.mapDeltas) {
             mapDeltas[i.first] = i.second.second;
         }
         vinfo = mempool.infoAll();
     }
 
     int64_t mid = GetTimeMicros();
 
     try {
         FILE *filestr = fsbridge::fopen(GetDataDir() / "mempool.dat.new", "wb");
         if (!filestr) {
             return;
         }
 
         CAutoFile file(filestr, SER_DISK, CLIENT_VERSION);
 
         uint64_t version = MEMPOOL_DUMP_VERSION;
         file << version;
 
         file << (uint64_t)vinfo.size();
         for (const auto &i : vinfo) {
             file << *(i.tx);
             file << (int64_t)i.nTime;
             file << (int64_t)i.nFeeDelta.GetSatoshis();
             mapDeltas.erase(i.tx->GetId());
         }
 
         file << mapDeltas;
         FileCommit(file.Get());
         file.fclose();
         RenameOver(GetDataDir() / "mempool.dat.new",
                    GetDataDir() / "mempool.dat");
         int64_t last = GetTimeMicros();
         LogPrintf("Dumped mempool: %gs to copy, %gs to dump\n",
                   (mid - start) * 0.000001, (last - mid) * 0.000001);
     } catch (const std::exception &e) {
         LogPrintf("Failed to dump mempool: %s. Continuing anyway.\n", e.what());
     }
 }
 
 //! Guess how far we are in the verification process at the given block index
 double GuessVerificationProgress(const ChainTxData &data, CBlockIndex *pindex) {
     if (pindex == nullptr) return 0.0;
 
     int64_t nNow = time(nullptr);
 
     double fTxTotal;
 
     if (pindex->nChainTx <= data.nTxCount) {
         fTxTotal = data.nTxCount + (nNow - data.nTime) * data.dTxRate;
     } else {
         fTxTotal =
             pindex->nChainTx + (nNow - pindex->GetBlockTime()) * data.dTxRate;
     }
 
     return pindex->nChainTx / fTxTotal;
 }
 
 class CMainCleanup {
 public:
     CMainCleanup() {}
     ~CMainCleanup() {
         // block headers
         BlockMap::iterator it1 = mapBlockIndex.begin();
         for (; it1 != mapBlockIndex.end(); it1++)
             delete (*it1).second;
         mapBlockIndex.clear();
     }
 } instance_of_cmaincleanup;
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index d2badb10b..c7f548a8c 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -1,71 +1,71 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "validationinterface.h"
 
 static CMainSignals g_signals;
 
 CMainSignals &GetMainSignals() {
     return g_signals;
 }
 
 void RegisterValidationInterface(CValidationInterface *pwalletIn) {
     g_signals.UpdatedBlockTip.connect(boost::bind(
         &CValidationInterface::UpdatedBlockTip, pwalletIn, _1, _2, _3));
-    g_signals.SyncTransaction.connect(boost::bind(
-        &CValidationInterface::SyncTransaction, pwalletIn, _1, _2, _3));
-    g_signals.UpdatedTransaction.connect(
-        boost::bind(&CValidationInterface::UpdatedTransaction, pwalletIn, _1));
+    g_signals.TransactionAddedToMempool.connect(boost::bind(
+        &CValidationInterface::TransactionAddedToMempool, pwalletIn, _1));
+    g_signals.BlockConnected.connect(boost::bind(
+        &CValidationInterface::BlockConnected, pwalletIn, _1, _2, _3));
+    g_signals.BlockDisconnected.connect(
+        boost::bind(&CValidationInterface::BlockDisconnected, pwalletIn, _1));
     g_signals.SetBestChain.connect(
         boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1));
     g_signals.Inventory.connect(
         boost::bind(&CValidationInterface::Inventory, pwalletIn, _1));
     g_signals.Broadcast.connect(boost::bind(
         &CValidationInterface::ResendWalletTransactions, pwalletIn, _1, _2));
     g_signals.BlockChecked.connect(
         boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2));
     g_signals.ScriptForMining.connect(
         boost::bind(&CValidationInterface::GetScriptForMining, pwalletIn, _1));
-    g_signals.BlockFound.connect(
-        boost::bind(&CValidationInterface::ResetRequestCount, pwalletIn, _1));
     g_signals.NewPoWValidBlock.connect(boost::bind(
         &CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2));
 }
 
 void UnregisterValidationInterface(CValidationInterface *pwalletIn) {
-    g_signals.BlockFound.disconnect(
-        boost::bind(&CValidationInterface::ResetRequestCount, pwalletIn, _1));
     g_signals.ScriptForMining.disconnect(
         boost::bind(&CValidationInterface::GetScriptForMining, pwalletIn, _1));
     g_signals.BlockChecked.disconnect(
         boost::bind(&CValidationInterface::BlockChecked, pwalletIn, _1, _2));
     g_signals.Broadcast.disconnect(boost::bind(
         &CValidationInterface::ResendWalletTransactions, pwalletIn, _1, _2));
     g_signals.Inventory.disconnect(
         boost::bind(&CValidationInterface::Inventory, pwalletIn, _1));
     g_signals.SetBestChain.disconnect(
         boost::bind(&CValidationInterface::SetBestChain, pwalletIn, _1));
-    g_signals.UpdatedTransaction.disconnect(
-        boost::bind(&CValidationInterface::UpdatedTransaction, pwalletIn, _1));
-    g_signals.SyncTransaction.disconnect(boost::bind(
-        &CValidationInterface::SyncTransaction, pwalletIn, _1, _2, _3));
+    g_signals.TransactionAddedToMempool.disconnect(boost::bind(
+        &CValidationInterface::TransactionAddedToMempool, pwalletIn, _1));
+    g_signals.BlockConnected.disconnect(boost::bind(
+        &CValidationInterface::BlockConnected, pwalletIn, _1, _2, _3));
+    g_signals.BlockDisconnected.disconnect(
+        boost::bind(&CValidationInterface::BlockDisconnected, pwalletIn, _1));
     g_signals.UpdatedBlockTip.disconnect(boost::bind(
         &CValidationInterface::UpdatedBlockTip, pwalletIn, _1, _2, _3));
     g_signals.NewPoWValidBlock.disconnect(boost::bind(
         &CValidationInterface::NewPoWValidBlock, pwalletIn, _1, _2));
 }
 
 void UnregisterAllValidationInterfaces() {
-    g_signals.BlockFound.disconnect_all_slots();
     g_signals.ScriptForMining.disconnect_all_slots();
     g_signals.BlockChecked.disconnect_all_slots();
     g_signals.Broadcast.disconnect_all_slots();
     g_signals.Inventory.disconnect_all_slots();
     g_signals.SetBestChain.disconnect_all_slots();
-    g_signals.UpdatedTransaction.disconnect_all_slots();
-    g_signals.SyncTransaction.disconnect_all_slots();
+    g_signals.TransactionAddedToMempool.disconnect_all_slots();
+    g_signals.BlockConnected.disconnect_all_slots();
+    g_signals.BlockDisconnected.disconnect_all_slots();
     g_signals.UpdatedBlockTip.disconnect_all_slots();
     g_signals.NewPoWValidBlock.disconnect_all_slots();
 }
diff --git a/src/validationinterface.h b/src/validationinterface.h
index 277aea627..dea1ae69e 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -1,109 +1,103 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_VALIDATIONINTERFACE_H
 #define BITCOIN_VALIDATIONINTERFACE_H
 
-#include <memory>
+#include "primitives/transaction.h" // CTransaction(Ref)
 
 #include <boost/signals2/signal.hpp>
 
+#include <memory>
+
 class CBlock;
 class CBlockIndex;
 struct CBlockLocator;
 class CBlockIndex;
 class CConnman;
 class CReserveScript;
-class CTransaction;
 class CValidationInterface;
 class CValidationState;
 class uint256;
 
 // These functions dispatch to one or all registered wallets
 
 /** Register a wallet to receive updates from core */
 void RegisterValidationInterface(CValidationInterface *pwalletIn);
 /** Unregister a wallet from core */
 void UnregisterValidationInterface(CValidationInterface *pwalletIn);
 /** Unregister all wallets from core */
 void UnregisterAllValidationInterfaces();
 
 class CValidationInterface {
 protected:
     virtual void UpdatedBlockTip(const CBlockIndex *pindexNew,
                                  const CBlockIndex *pindexFork,
                                  bool fInitialDownload) {}
-    virtual void SyncTransaction(const CTransaction &tx,
-                                 const CBlockIndex *pindex, int posInBlock) {}
+    virtual void TransactionAddedToMempool(const CTransactionRef &ptxn) {}
+    virtual void
+    BlockConnected(const std::shared_ptr<const CBlock> &block,
+                   const CBlockIndex *pindex,
+                   const std::vector<CTransactionRef> &txnConflicted) {}
+    virtual void BlockDisconnected(const std::shared_ptr<const CBlock> &block) {
+    }
     virtual void SetBestChain(const CBlockLocator &locator) {}
-    virtual void UpdatedTransaction(const uint256 &hash) {}
     virtual void Inventory(const uint256 &hash) {}
     virtual void ResendWalletTransactions(int64_t nBestBlockTime,
                                           CConnman *connman) {}
     virtual void BlockChecked(const CBlock &, const CValidationState &) {}
     virtual void GetScriptForMining(std::shared_ptr<CReserveScript> &){};
-    virtual void ResetRequestCount(const uint256 &hash){};
     virtual void NewPoWValidBlock(const CBlockIndex *pindex,
                                   const std::shared_ptr<const CBlock> &block){};
     friend void ::RegisterValidationInterface(CValidationInterface *);
     friend void ::UnregisterValidationInterface(CValidationInterface *);
     friend void ::UnregisterAllValidationInterfaces();
 };
 
 struct CMainSignals {
     /** Notifies listeners of updated block chain tip */
     boost::signals2::signal<void(const CBlockIndex *, const CBlockIndex *,
                                  bool fInitialDownload)>
         UpdatedBlockTip;
+    /** Notifies listeners of a transaction having been added to mempool. */
+    boost::signals2::signal<void(const CTransactionRef &)>
+        TransactionAddedToMempool;
     /**
-     * A posInBlock value for SyncTransaction calls for tranactions not included
-     * in connected blocks such as transactions removed from mempool, accepted
-     * to mempool or appearing in disconnected blocks.
-     */
-    static const int SYNC_TRANSACTION_NOT_IN_BLOCK = -1;
-    /**
-     * Notifies listeners of updated transaction data (transaction, and
-     * optionally the block it is found in). Called with block data when
-     * transaction is included in a connected block, and without block data when
-     * transaction was accepted to mempool, removed from mempool (only when
-     * removal was due to conflict from connected block), or appeared in a
-     * disconnected block.
-     */
-    boost::signals2::signal<void(const CTransaction &,
-                                 const CBlockIndex *pindex, int posInBlock)>
-        SyncTransaction;
-    /**
-     * Notifies listeners of an updated transaction without new data (for now: a
-     * coinbase potentially becoming visible).
+     * Notifies listeners of a block being connected.
+     * Provides a vector of transactions evicted from the mempool as a result.
      */
-    boost::signals2::signal<void(const uint256 &)> UpdatedTransaction;
+    boost::signals2::signal<void(const std::shared_ptr<const CBlock> &,
+                                 const CBlockIndex *pindex,
+                                 const std::vector<CTransactionRef> &)>
+        BlockConnected;
+    /** Notifies listeners of a block being disconnected */
+    boost::signals2::signal<void(const std::shared_ptr<const CBlock> &)>
+        BlockDisconnected;
     /** Notifies listeners of a new active block chain. */
     boost::signals2::signal<void(const CBlockLocator &)> SetBestChain;
     /** Notifies listeners about an inventory item being seen on the network. */
     boost::signals2::signal<void(const uint256 &)> Inventory;
     /** Tells listeners to broadcast their data. */
     boost::signals2::signal<void(int64_t nBestBlockTime, CConnman *connman)>
         Broadcast;
     /** Notifies listeners of a block validation result */
     boost::signals2::signal<void(const CBlock &, const CValidationState &)>
         BlockChecked;
     /** Notifies listeners that a key for mining is required (coinbase) */
     boost::signals2::signal<void(std::shared_ptr<CReserveScript> &)>
         ScriptForMining;
-    /** Notifies listeners that a block has been successfully mined */
-    boost::signals2::signal<void(const uint256 &)> BlockFound;
     /**
      * Notifies listeners that a block which builds directly on our current tip
      * has been received and connected to the headers tree, though not validated
      * yet.
      */
     boost::signals2::signal<void(const CBlockIndex *,
                                  const std::shared_ptr<const CBlock> &)>
         NewPoWValidBlock;
 };
 
 CMainSignals &GetMainSignals();
 
 #endif // BITCOIN_VALIDATIONINTERFACE_H
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 23a513502..2009b159d 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1,4579 +1,4635 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "wallet/wallet.h"
 
 #include "chain.h"
 #include "checkpoints.h"
 #include "config.h"
 #include "consensus/consensus.h"
 #include "consensus/validation.h"
 #include "dstencode.h"
 #include "fs.h"
 #include "key.h"
 #include "keystore.h"
 #include "net.h"
 #include "policy/policy.h"
 #include "primitives/block.h"
 #include "primitives/transaction.h"
 #include "scheduler.h"
 #include "script/script.h"
 #include "script/sighashtype.h"
 #include "script/sign.h"
 #include "timedata.h"
 #include "txmempool.h"
 #include "ui_interface.h"
 #include "util.h"
 #include "utilmoneystr.h"
 #include "validation.h"
 #include "wallet/coincontrol.h"
 #include "wallet/finaltx.h"
 
 #include <boost/algorithm/string/replace.hpp>
 #include <boost/thread.hpp>
 
 #include <cassert>
 
 CWallet *pwalletMain = nullptr;
 
 /** Transaction fee set by the user */
 CFeeRate payTxFee(DEFAULT_TRANSACTION_FEE);
 unsigned int nTxConfirmTarget = DEFAULT_TX_CONFIRM_TARGET;
 bool bSpendZeroConfChange = DEFAULT_SPEND_ZEROCONF_CHANGE;
 bool fSendFreeTransactions = DEFAULT_SEND_FREE_TRANSACTIONS;
 
 const char *DEFAULT_WALLET_DAT = "wallet.dat";
 const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000;
 
 /**
  * Fees smaller than this (in satoshi) are considered zero fee (for transaction
  * creation)
  * Override with -mintxfee
  */
 CFeeRate CWallet::minTxFee = CFeeRate(DEFAULT_TRANSACTION_MINFEE);
 
 /**
  * If fee estimation does not have enough data to provide estimates, use this
  * fee instead. Has no effect if not using fee estimation.
  * Override with -fallbackfee
  */
 CFeeRate CWallet::fallbackFee = CFeeRate(DEFAULT_FALLBACK_FEE);
 
 const uint256 CMerkleTx::ABANDON_HASH(uint256S(
     "0000000000000000000000000000000000000000000000000000000000000001"));
 
 /** @defgroup mapWallet
  *
  * @{
  */
 
 struct CompareValueOnly {
     bool operator()(
         const std::pair<Amount, std::pair<const CWalletTx *, unsigned int>> &t1,
         const std::pair<Amount, std::pair<const CWalletTx *, unsigned int>> &t2)
         const {
         return t1.first < t2.first;
     }
 };
 
 std::string COutput::ToString() const {
     return strprintf("COutput(%s, %d, %d) [%s]", tx->GetId().ToString(), i,
                      nDepth, FormatMoney(tx->tx->vout[i].nValue));
 }
 
 const CWalletTx *CWallet::GetWalletTx(const uint256 &hash) const {
     LOCK(cs_wallet);
     std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(hash);
     if (it == mapWallet.end()) {
         return nullptr;
     }
 
     return &(it->second);
 }
 
 CPubKey CWallet::GenerateNewKey(bool internal) {
     // mapKeyMetadata
     AssertLockHeld(cs_wallet);
     // default to compressed public keys if we want 0.6.0 wallets
     bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY);
 
     CKey secret;
 
     // Create new metadata
     int64_t nCreationTime = GetTime();
     CKeyMetadata metadata(nCreationTime);
 
     // use HD key derivation if HD was enabled during wallet creation
     if (IsHDEnabled()) {
         DeriveNewChildKey(
             metadata, secret,
             (CanSupportFeature(FEATURE_HD_SPLIT) ? internal : false));
     } else {
         secret.MakeNewKey(fCompressed);
     }
 
     // Compressed public keys were introduced in version 0.6.0
     if (fCompressed) {
         SetMinVersion(FEATURE_COMPRPUBKEY);
     }
 
     CPubKey pubkey = secret.GetPubKey();
     assert(secret.VerifyPubKey(pubkey));
 
     mapKeyMetadata[pubkey.GetID()] = metadata;
     UpdateTimeFirstKey(nCreationTime);
 
     if (!AddKeyPubKey(secret, pubkey)) {
         throw std::runtime_error(std::string(__func__) + ": AddKey failed");
     }
 
     return pubkey;
 }
 
 void CWallet::DeriveNewChildKey(CKeyMetadata &metadata, CKey &secret,
                                 bool internal) {
     // for now we use a fixed keypath scheme of m/0'/0'/k
     // master key seed (256bit)
     CKey key;
     // hd master key
     CExtKey masterKey;
     // key at m/0'
     CExtKey accountKey;
     // key at m/0'/0' (external) or m/0'/1' (internal)
     CExtKey chainChildKey;
     // key at m/0'/0'/<n>'
     CExtKey childKey;
 
     // try to get the master key
     if (!GetKey(hdChain.masterKeyID, key)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": Master key not found");
     }
 
     masterKey.SetMaster(key.begin(), key.size());
 
     // derive m/0'
     // use hardened derivation (child keys >= 0x80000000 are hardened after
     // bip32)
     masterKey.Derive(accountKey, BIP32_HARDENED_KEY_LIMIT);
 
     // derive m/0'/0' (external chain) OR m/0'/1' (internal chain)
     assert(internal ? CanSupportFeature(FEATURE_HD_SPLIT) : true);
     accountKey.Derive(chainChildKey,
                       BIP32_HARDENED_KEY_LIMIT + (internal ? 1 : 0));
 
     // derive child key at next index, skip keys already known to the wallet
     do {
         // always derive hardened keys
         // childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened
         // child-index-range
         // example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
         if (internal) {
             chainChildKey.Derive(childKey,
                                  hdChain.nInternalChainCounter |
                                      BIP32_HARDENED_KEY_LIMIT);
             metadata.hdKeypath = "m/0'/1'/" +
                                  std::to_string(hdChain.nInternalChainCounter) +
                                  "'";
             hdChain.nInternalChainCounter++;
         } else {
             chainChildKey.Derive(childKey,
                                  hdChain.nExternalChainCounter |
                                      BIP32_HARDENED_KEY_LIMIT);
             metadata.hdKeypath = "m/0'/0'/" +
                                  std::to_string(hdChain.nExternalChainCounter) +
                                  "'";
             hdChain.nExternalChainCounter++;
         }
     } while (HaveKey(childKey.key.GetPubKey().GetID()));
     secret = childKey.key;
     metadata.hdMasterKeyID = hdChain.masterKeyID;
     // update the chain model in the database
     if (!CWalletDB(strWalletFile).WriteHDChain(hdChain)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": Writing HD chain model failed");
     }
 }
 
 bool CWallet::AddKeyPubKey(const CKey &secret, const CPubKey &pubkey) {
     // mapKeyMetadata
     AssertLockHeld(cs_wallet);
     if (!CCryptoKeyStore::AddKeyPubKey(secret, pubkey)) {
         return false;
     }
 
     // Check if we need to remove from watch-only.
     CScript script;
     script = GetScriptForDestination(pubkey.GetID());
     if (HaveWatchOnly(script)) {
         RemoveWatchOnly(script);
     }
 
     script = GetScriptForRawPubKey(pubkey);
     if (HaveWatchOnly(script)) {
         RemoveWatchOnly(script);
     }
 
     if (!fFileBacked) {
         return true;
     }
 
     if (IsCrypted()) {
         return true;
     }
 
     return CWalletDB(strWalletFile)
         .WriteKey(pubkey, secret.GetPrivKey(), mapKeyMetadata[pubkey.GetID()]);
 }
 
 bool CWallet::AddCryptedKey(const CPubKey &vchPubKey,
                             const std::vector<uint8_t> &vchCryptedSecret) {
     if (!CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret)) {
         return false;
     }
 
     if (!fFileBacked) {
         return true;
     }
 
     LOCK(cs_wallet);
     if (pwalletdbEncryption) {
         return pwalletdbEncryption->WriteCryptedKey(
             vchPubKey, vchCryptedSecret, mapKeyMetadata[vchPubKey.GetID()]);
     }
 
     return CWalletDB(strWalletFile)
         .WriteCryptedKey(vchPubKey, vchCryptedSecret,
                          mapKeyMetadata[vchPubKey.GetID()]);
 }
 
 bool CWallet::LoadKeyMetadata(const CTxDestination &keyID,
                               const CKeyMetadata &meta) {
     // mapKeyMetadata
     AssertLockHeld(cs_wallet);
     UpdateTimeFirstKey(meta.nCreateTime);
     mapKeyMetadata[keyID] = meta;
     return true;
 }
 
 bool CWallet::LoadCryptedKey(const CPubKey &vchPubKey,
                              const std::vector<uint8_t> &vchCryptedSecret) {
     return CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret);
 }
 
 void CWallet::UpdateTimeFirstKey(int64_t nCreateTime) {
     AssertLockHeld(cs_wallet);
     if (nCreateTime <= 1) {
         // Cannot determine birthday information, so set the wallet birthday to
         // the beginning of time.
         nTimeFirstKey = 1;
     } else if (!nTimeFirstKey || nCreateTime < nTimeFirstKey) {
         nTimeFirstKey = nCreateTime;
     }
 }
 
 bool CWallet::AddCScript(const CScript &redeemScript) {
     if (!CCryptoKeyStore::AddCScript(redeemScript)) {
         return false;
     }
 
     if (!fFileBacked) {
         return true;
     }
 
     return CWalletDB(strWalletFile)
         .WriteCScript(Hash160(redeemScript), redeemScript);
 }
 
 bool CWallet::LoadCScript(const CScript &redeemScript) {
     /**
      * A sanity check was added in pull #3843 to avoid adding redeemScripts that
      * never can be redeemed. However, old wallets may still contain these. Do
      * not add them to the wallet and warn.
      */
     if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE) {
         std::string strAddr = EncodeDestination(CScriptID(redeemScript));
         LogPrintf("%s: Warning: This wallet contains a redeemScript of size %i "
                   "which exceeds maximum size %i thus can never be redeemed. "
                   "Do not use address %s.\n",
                   __func__, redeemScript.size(), MAX_SCRIPT_ELEMENT_SIZE,
                   strAddr);
         return true;
     }
 
     return CCryptoKeyStore::AddCScript(redeemScript);
 }
 
 bool CWallet::AddWatchOnly(const CScript &dest) {
     if (!CCryptoKeyStore::AddWatchOnly(dest)) {
         return false;
     }
 
     const CKeyMetadata &meta = mapKeyMetadata[CScriptID(dest)];
     UpdateTimeFirstKey(meta.nCreateTime);
     NotifyWatchonlyChanged(true);
 
     if (!fFileBacked) {
         return true;
     }
 
     return CWalletDB(strWalletFile).WriteWatchOnly(dest, meta);
 }
 
 bool CWallet::AddWatchOnly(const CScript &dest, int64_t nCreateTime) {
     mapKeyMetadata[CScriptID(dest)].nCreateTime = nCreateTime;
     return AddWatchOnly(dest);
 }
 
 bool CWallet::RemoveWatchOnly(const CScript &dest) {
     AssertLockHeld(cs_wallet);
     if (!CCryptoKeyStore::RemoveWatchOnly(dest)) {
         return false;
     }
 
     if (!HaveWatchOnly()) {
         NotifyWatchonlyChanged(false);
     }
 
     if (fFileBacked && !CWalletDB(strWalletFile).EraseWatchOnly(dest)) {
         return false;
     }
 
     return true;
 }
 
 bool CWallet::LoadWatchOnly(const CScript &dest) {
     return CCryptoKeyStore::AddWatchOnly(dest);
 }
 
 bool CWallet::Unlock(const SecureString &strWalletPassphrase) {
     CCrypter crypter;
     CKeyingMaterial vMasterKey;
 
     LOCK(cs_wallet);
     for (const MasterKeyMap::value_type &pMasterKey : mapMasterKeys) {
         if (!crypter.SetKeyFromPassphrase(
                 strWalletPassphrase, pMasterKey.second.vchSalt,
                 pMasterKey.second.nDeriveIterations,
                 pMasterKey.second.nDerivationMethod)) {
             return false;
         }
 
         if (!crypter.Decrypt(pMasterKey.second.vchCryptedKey, vMasterKey)) {
             // try another master key
             continue;
         }
 
         if (CCryptoKeyStore::Unlock(vMasterKey)) {
             return true;
         }
     }
 
     return false;
 }
 
 bool CWallet::ChangeWalletPassphrase(
     const SecureString &strOldWalletPassphrase,
     const SecureString &strNewWalletPassphrase) {
     bool fWasLocked = IsLocked();
 
     LOCK(cs_wallet);
     Lock();
 
     CCrypter crypter;
     CKeyingMaterial vMasterKey;
     for (MasterKeyMap::value_type &pMasterKey : mapMasterKeys) {
         if (!crypter.SetKeyFromPassphrase(
                 strOldWalletPassphrase, pMasterKey.second.vchSalt,
                 pMasterKey.second.nDeriveIterations,
                 pMasterKey.second.nDerivationMethod)) {
             return false;
         }
 
         if (!crypter.Decrypt(pMasterKey.second.vchCryptedKey, vMasterKey)) {
             return false;
         }
 
         if (CCryptoKeyStore::Unlock(vMasterKey)) {
             int64_t nStartTime = GetTimeMillis();
             crypter.SetKeyFromPassphrase(strNewWalletPassphrase,
                                          pMasterKey.second.vchSalt,
                                          pMasterKey.second.nDeriveIterations,
                                          pMasterKey.second.nDerivationMethod);
             pMasterKey.second.nDeriveIterations =
                 pMasterKey.second.nDeriveIterations *
                 (100 / ((double)(GetTimeMillis() - nStartTime)));
 
             nStartTime = GetTimeMillis();
             crypter.SetKeyFromPassphrase(strNewWalletPassphrase,
                                          pMasterKey.second.vchSalt,
                                          pMasterKey.second.nDeriveIterations,
                                          pMasterKey.second.nDerivationMethod);
             pMasterKey.second.nDeriveIterations =
                 (pMasterKey.second.nDeriveIterations +
                  pMasterKey.second.nDeriveIterations * 100 /
                      double(GetTimeMillis() - nStartTime)) /
                 2;
 
             if (pMasterKey.second.nDeriveIterations < 25000) {
                 pMasterKey.second.nDeriveIterations = 25000;
             }
 
             LogPrintf(
                 "Wallet passphrase changed to an nDeriveIterations of %i\n",
                 pMasterKey.second.nDeriveIterations);
 
             if (!crypter.SetKeyFromPassphrase(
                     strNewWalletPassphrase, pMasterKey.second.vchSalt,
                     pMasterKey.second.nDeriveIterations,
                     pMasterKey.second.nDerivationMethod)) {
                 return false;
             }
 
             if (!crypter.Encrypt(vMasterKey, pMasterKey.second.vchCryptedKey)) {
                 return false;
             }
 
             CWalletDB(strWalletFile)
                 .WriteMasterKey(pMasterKey.first, pMasterKey.second);
             if (fWasLocked) {
                 Lock();
             }
 
             return true;
         }
     }
 
     return false;
 }
 
 void CWallet::SetBestChain(const CBlockLocator &loc) {
     CWalletDB walletdb(strWalletFile);
     walletdb.WriteBestBlock(loc);
 }
 
 bool CWallet::SetMinVersion(enum WalletFeature nVersion, CWalletDB *pwalletdbIn,
                             bool fExplicit) {
     // nWalletVersion
     LOCK(cs_wallet);
     if (nWalletVersion >= nVersion) {
         return true;
     }
 
     // When doing an explicit upgrade, if we pass the max version permitted,
     // upgrade all the way.
     if (fExplicit && nVersion > nWalletMaxVersion) {
         nVersion = FEATURE_LATEST;
     }
 
     nWalletVersion = nVersion;
 
     if (nVersion > nWalletMaxVersion) {
         nWalletMaxVersion = nVersion;
     }
 
     if (fFileBacked) {
         CWalletDB *pwalletdb =
             pwalletdbIn ? pwalletdbIn : new CWalletDB(strWalletFile);
         if (nWalletVersion > 40000) {
             pwalletdb->WriteMinVersion(nWalletVersion);
         }
 
         if (!pwalletdbIn) {
             delete pwalletdb;
         }
     }
 
     return true;
 }
 
 bool CWallet::SetMaxVersion(int nVersion) {
     // nWalletVersion, nWalletMaxVersion
     LOCK(cs_wallet);
 
     // Cannot downgrade below current version
     if (nWalletVersion > nVersion) {
         return false;
     }
 
     nWalletMaxVersion = nVersion;
 
     return true;
 }
 
 std::set<uint256> CWallet::GetConflicts(const uint256 &txid) const {
     std::set<uint256> result;
     AssertLockHeld(cs_wallet);
 
     std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(txid);
     if (it == mapWallet.end()) {
         return result;
     }
 
     const CWalletTx &wtx = it->second;
 
     std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range;
 
     for (const CTxIn &txin : wtx.tx->vin) {
         if (mapTxSpends.count(txin.prevout) <= 1) {
             // No conflict if zero or one spends.
             continue;
         }
 
         range = mapTxSpends.equal_range(txin.prevout);
         for (TxSpends::const_iterator _it = range.first; _it != range.second;
              ++_it) {
             result.insert(_it->second);
         }
     }
 
     return result;
 }
 
 bool CWallet::HasWalletSpend(const uint256 &txid) const {
     AssertLockHeld(cs_wallet);
     auto iter = mapTxSpends.lower_bound(COutPoint(txid, 0));
     return (iter != mapTxSpends.end() && iter->first.hash == txid);
 }
 
 void CWallet::Flush(bool shutdown) {
     bitdb.Flush(shutdown);
 }
 
 bool CWallet::Verify() {
     if (GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) {
         return true;
     }
 
     uiInterface.InitMessage(_("Verifying wallet..."));
     std::string walletFile = GetArg("-wallet", DEFAULT_WALLET_DAT);
 
     std::string strError;
     if (!CWalletDB::VerifyEnvironment(walletFile, GetDataDir().string(),
                                       strError)) {
         return InitError(strError);
     }
 
     if (GetBoolArg("-salvagewallet", false)) {
         // Recover readable keypairs:
         CWallet dummyWallet;
         if (!CWalletDB::Recover(walletFile, (void *)&dummyWallet,
                                 CWalletDB::RecoverKeysOnlyFilter)) {
             return false;
         }
     }
 
     std::string strWarning;
     bool dbV = CWalletDB::VerifyDatabaseFile(walletFile, GetDataDir().string(),
                                              strWarning, strError);
     if (!strWarning.empty()) {
         InitWarning(strWarning);
     }
     if (!dbV) {
         InitError(strError);
         return false;
     }
     return true;
 }
 
 void CWallet::SyncMetaData(
     std::pair<TxSpends::iterator, TxSpends::iterator> range) {
     // We want all the wallet transactions in range to have the same metadata as
     // the oldest (smallest nOrderPos).
     // So: find smallest nOrderPos:
 
     int nMinOrderPos = std::numeric_limits<int>::max();
     const CWalletTx *copyFrom = nullptr;
     for (TxSpends::iterator it = range.first; it != range.second; ++it) {
         const uint256 &hash = it->second;
         int n = mapWallet[hash].nOrderPos;
         if (n < nMinOrderPos) {
             nMinOrderPos = n;
             copyFrom = &mapWallet[hash];
         }
     }
 
     // Now copy data from copyFrom to rest:
     for (TxSpends::iterator it = range.first; it != range.second; ++it) {
         const uint256 &hash = it->second;
         CWalletTx *copyTo = &mapWallet[hash];
         if (copyFrom == copyTo) {
             continue;
         }
 
         if (!copyFrom->IsEquivalentTo(*copyTo)) {
             continue;
         }
 
         copyTo->mapValue = copyFrom->mapValue;
         copyTo->vOrderForm = copyFrom->vOrderForm;
         // fTimeReceivedIsTxTime not copied on purpose nTimeReceived not copied
         // on purpose.
         copyTo->nTimeSmart = copyFrom->nTimeSmart;
         copyTo->fFromMe = copyFrom->fFromMe;
         copyTo->strFromAccount = copyFrom->strFromAccount;
         // nOrderPos not copied on purpose cached members not copied on purpose.
     }
 }
 
 /**
  * Outpoint is spent if any non-conflicted transaction, spends it:
  */
 bool CWallet::IsSpent(const uint256 &hash, unsigned int n) const {
     const COutPoint outpoint(hash, n);
     std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range;
     range = mapTxSpends.equal_range(outpoint);
 
     for (TxSpends::const_iterator it = range.first; it != range.second; ++it) {
         const uint256 &wtxid = it->second;
         std::map<uint256, CWalletTx>::const_iterator mit =
             mapWallet.find(wtxid);
         if (mit != mapWallet.end()) {
             int depth = mit->second.GetDepthInMainChain();
             if (depth > 0 || (depth == 0 && !mit->second.isAbandoned())) {
                 // Spent
                 return true;
             }
         }
     }
 
     return false;
 }
 
 void CWallet::AddToSpends(const COutPoint &outpoint, const uint256 &wtxid) {
     mapTxSpends.insert(std::make_pair(outpoint, wtxid));
 
     std::pair<TxSpends::iterator, TxSpends::iterator> range;
     range = mapTxSpends.equal_range(outpoint);
     SyncMetaData(range);
 }
 
 void CWallet::AddToSpends(const uint256 &wtxid) {
     assert(mapWallet.count(wtxid));
     CWalletTx &thisTx = mapWallet[wtxid];
     // Coinbases don't spend anything!
     if (thisTx.IsCoinBase()) {
         return;
     }
 
     for (const CTxIn &txin : thisTx.tx->vin) {
         AddToSpends(txin.prevout, wtxid);
     }
 }
 
 bool CWallet::EncryptWallet(const SecureString &strWalletPassphrase) {
     if (IsCrypted()) {
         return false;
     }
 
     CKeyingMaterial vMasterKey;
 
     vMasterKey.resize(WALLET_CRYPTO_KEY_SIZE);
     GetStrongRandBytes(&vMasterKey[0], WALLET_CRYPTO_KEY_SIZE);
 
     CMasterKey kMasterKey;
 
     kMasterKey.vchSalt.resize(WALLET_CRYPTO_SALT_SIZE);
     GetStrongRandBytes(&kMasterKey.vchSalt[0], WALLET_CRYPTO_SALT_SIZE);
 
     CCrypter crypter;
     int64_t nStartTime = GetTimeMillis();
     crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, 25000,
                                  kMasterKey.nDerivationMethod);
     kMasterKey.nDeriveIterations =
         2500000 / ((double)(GetTimeMillis() - nStartTime));
 
     nStartTime = GetTimeMillis();
     crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt,
                                  kMasterKey.nDeriveIterations,
                                  kMasterKey.nDerivationMethod);
     kMasterKey.nDeriveIterations =
         (kMasterKey.nDeriveIterations +
          kMasterKey.nDeriveIterations * 100 /
              ((double)(GetTimeMillis() - nStartTime))) /
         2;
 
     if (kMasterKey.nDeriveIterations < 25000) {
         kMasterKey.nDeriveIterations = 25000;
     }
 
     LogPrintf("Encrypting Wallet with an nDeriveIterations of %i\n",
               kMasterKey.nDeriveIterations);
 
     if (!crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt,
                                       kMasterKey.nDeriveIterations,
                                       kMasterKey.nDerivationMethod)) {
         return false;
     }
 
     if (!crypter.Encrypt(vMasterKey, kMasterKey.vchCryptedKey)) {
         return false;
     }
 
     {
         LOCK(cs_wallet);
         mapMasterKeys[++nMasterKeyMaxID] = kMasterKey;
         if (fFileBacked) {
             assert(!pwalletdbEncryption);
             pwalletdbEncryption = new CWalletDB(strWalletFile);
             if (!pwalletdbEncryption->TxnBegin()) {
                 delete pwalletdbEncryption;
                 pwalletdbEncryption = nullptr;
                 return false;
             }
 
             pwalletdbEncryption->WriteMasterKey(nMasterKeyMaxID, kMasterKey);
         }
 
         if (!EncryptKeys(vMasterKey)) {
             if (fFileBacked) {
                 pwalletdbEncryption->TxnAbort();
                 delete pwalletdbEncryption;
             }
 
             // We now probably have half of our keys encrypted in memory, and
             // half not... die and let the user reload the unencrypted wallet.
             assert(false);
         }
 
         // Encryption was introduced in version 0.4.0
         SetMinVersion(FEATURE_WALLETCRYPT, pwalletdbEncryption, true);
 
         if (fFileBacked) {
             if (!pwalletdbEncryption->TxnCommit()) {
                 delete pwalletdbEncryption;
                 // We now have keys encrypted in memory, but not on disk... die
                 // to avoid confusion and let the user reload the unencrypted
                 // wallet.
                 assert(false);
             }
 
             delete pwalletdbEncryption;
             pwalletdbEncryption = nullptr;
         }
 
         Lock();
         Unlock(strWalletPassphrase);
 
         // If we are using HD, replace the HD master key (seed) with a new one.
         if (IsHDEnabled()) {
             CKey key;
             CPubKey masterPubKey = GenerateNewHDMasterKey();
             // preserve the old chains version to not break backward
             // compatibility
             CHDChain oldChain = GetHDChain();
             if (!SetHDMasterKey(masterPubKey, &oldChain)) {
                 return false;
             }
         }
 
         NewKeyPool();
         Lock();
 
         // Need to completely rewrite the wallet file; if we don't, bdb might
         // keep bits of the unencrypted private key in slack space in the
         // database file.
         CDB::Rewrite(strWalletFile);
     }
 
     NotifyStatusChanged(this);
     return true;
 }
 
 DBErrors CWallet::ReorderTransactions() {
     LOCK(cs_wallet);
     CWalletDB walletdb(strWalletFile);
 
     // Old wallets didn't have any defined order for transactions. Probably a
     // bad idea to change the output of this.
 
     // First: get all CWalletTx and CAccountingEntry into a sorted-by-time
     // multimap.
     typedef std::pair<CWalletTx *, CAccountingEntry *> TxPair;
     typedef std::multimap<int64_t, TxPair> TxItems;
     TxItems txByTime;
 
     for (std::map<uint256, CWalletTx>::iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         CWalletTx *wtx = &((*it).second);
         txByTime.insert(
             std::make_pair(wtx->nTimeReceived, TxPair(wtx, nullptr)));
     }
 
     std::list<CAccountingEntry> acentries;
     walletdb.ListAccountCreditDebit("", acentries);
     for (CAccountingEntry &entry : acentries) {
         txByTime.insert(std::make_pair(entry.nTime, TxPair(nullptr, &entry)));
     }
 
     nOrderPosNext = 0;
     std::vector<int64_t> nOrderPosOffsets;
     for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it) {
         CWalletTx *const pwtx = (*it).second.first;
         CAccountingEntry *const pacentry = (*it).second.second;
         int64_t &nOrderPos =
             (pwtx != 0) ? pwtx->nOrderPos : pacentry->nOrderPos;
 
         if (nOrderPos == -1) {
             nOrderPos = nOrderPosNext++;
             nOrderPosOffsets.push_back(nOrderPos);
 
             if (pwtx) {
                 if (!walletdb.WriteTx(*pwtx)) {
                     return DB_LOAD_FAIL;
                 }
             } else if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo,
                                                       *pacentry)) {
                 return DB_LOAD_FAIL;
             }
         } else {
             int64_t nOrderPosOff = 0;
             for (const int64_t &nOffsetStart : nOrderPosOffsets) {
                 if (nOrderPos >= nOffsetStart) {
                     ++nOrderPosOff;
                 }
             }
 
             nOrderPos += nOrderPosOff;
             nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1);
 
             if (!nOrderPosOff) {
                 continue;
             }
 
             // Since we're changing the order, write it back.
             if (pwtx) {
                 if (!walletdb.WriteTx(*pwtx)) {
                     return DB_LOAD_FAIL;
                 }
             } else if (!walletdb.WriteAccountingEntry(pacentry->nEntryNo,
                                                       *pacentry)) {
                 return DB_LOAD_FAIL;
             }
         }
     }
 
     walletdb.WriteOrderPosNext(nOrderPosNext);
 
     return DB_LOAD_OK;
 }
 
 int64_t CWallet::IncOrderPosNext(CWalletDB *pwalletdb) {
     // nOrderPosNext
     AssertLockHeld(cs_wallet);
     int64_t nRet = nOrderPosNext++;
     if (pwalletdb) {
         pwalletdb->WriteOrderPosNext(nOrderPosNext);
     } else {
         CWalletDB(strWalletFile).WriteOrderPosNext(nOrderPosNext);
     }
 
     return nRet;
 }
 
 bool CWallet::AccountMove(std::string strFrom, std::string strTo,
                           const Amount nAmount, std::string strComment) {
     CWalletDB walletdb(strWalletFile);
     if (!walletdb.TxnBegin()) {
         return false;
     }
 
     int64_t nNow = GetAdjustedTime();
 
     // Debit
     CAccountingEntry debit;
     debit.nOrderPos = IncOrderPosNext(&walletdb);
     debit.strAccount = strFrom;
     debit.nCreditDebit = -1 * nAmount;
     debit.nTime = nNow;
     debit.strOtherAccount = strTo;
     debit.strComment = strComment;
     AddAccountingEntry(debit, &walletdb);
 
     // Credit
     CAccountingEntry credit;
     credit.nOrderPos = IncOrderPosNext(&walletdb);
     credit.strAccount = strTo;
     credit.nCreditDebit = nAmount;
     credit.nTime = nNow;
     credit.strOtherAccount = strFrom;
     credit.strComment = strComment;
     AddAccountingEntry(credit, &walletdb);
 
     if (!walletdb.TxnCommit()) {
         return false;
     }
 
     return true;
 }
 
 bool CWallet::GetAccountPubkey(CPubKey &pubKey, std::string strAccount,
                                bool bForceNew) {
     CWalletDB walletdb(strWalletFile);
 
     CAccount account;
     walletdb.ReadAccount(strAccount, account);
 
     if (!bForceNew) {
         if (!account.vchPubKey.IsValid()) {
             bForceNew = true;
         } else {
             // Check if the current key has been used.
             CScript scriptPubKey =
                 GetScriptForDestination(account.vchPubKey.GetID());
             for (std::map<uint256, CWalletTx>::iterator it = mapWallet.begin();
                  it != mapWallet.end() && account.vchPubKey.IsValid(); ++it) {
                 for (const CTxOut &txout : (*it).second.tx->vout) {
                     if (txout.scriptPubKey == scriptPubKey) {
                         bForceNew = true;
                         break;
                     }
                 }
             }
         }
     }
 
     // Generate a new key
     if (bForceNew) {
         if (!GetKeyFromPool(account.vchPubKey, false)) {
             return false;
         }
 
         SetAddressBook(account.vchPubKey.GetID(), strAccount, "receive");
         walletdb.WriteAccount(strAccount, account);
     }
 
     pubKey = account.vchPubKey;
 
     return true;
 }
 
 void CWallet::MarkDirty() {
     LOCK(cs_wallet);
     for (std::pair<const uint256, CWalletTx> &item : mapWallet) {
         item.second.MarkDirty();
     }
 }
 
 bool CWallet::MarkReplaced(const uint256 &originalHash,
                            const uint256 &newHash) {
     LOCK(cs_wallet);
 
     auto mi = mapWallet.find(originalHash);
 
     // There is a bug if MarkReplaced is not called on an existing wallet
     // transaction.
     assert(mi != mapWallet.end());
 
     CWalletTx &wtx = (*mi).second;
 
     // Ensure for now that we're not overwriting data.
     assert(wtx.mapValue.count("replaced_by_txid") == 0);
 
     wtx.mapValue["replaced_by_txid"] = newHash.ToString();
 
     CWalletDB walletdb(strWalletFile, "r+");
 
     bool success = true;
     if (!walletdb.WriteTx(wtx)) {
         LogPrintf("%s: Updating walletdb tx %s failed", __func__,
                   wtx.GetId().ToString());
         success = false;
     }
 
     NotifyTransactionChanged(this, originalHash, CT_UPDATED);
 
     return success;
 }
 
 bool CWallet::AddToWallet(const CWalletTx &wtxIn, bool fFlushOnClose) {
     LOCK(cs_wallet);
 
     CWalletDB walletdb(strWalletFile, "r+", fFlushOnClose);
 
     uint256 hash = wtxIn.GetId();
 
     // Inserts only if not already there, returns tx inserted or tx found.
     std::pair<std::map<uint256, CWalletTx>::iterator, bool> ret =
         mapWallet.insert(std::make_pair(hash, wtxIn));
     CWalletTx &wtx = (*ret.first).second;
     wtx.BindWallet(this);
     bool fInsertedNew = ret.second;
     if (fInsertedNew) {
         wtx.nTimeReceived = GetAdjustedTime();
         wtx.nOrderPos = IncOrderPosNext(&walletdb);
         wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
         wtx.nTimeSmart = ComputeTimeSmart(wtx);
         AddToSpends(hash);
     }
 
     bool fUpdated = false;
     if (!fInsertedNew) {
         // Merge
         if (!wtxIn.hashUnset() && wtxIn.hashBlock != wtx.hashBlock) {
             wtx.hashBlock = wtxIn.hashBlock;
             fUpdated = true;
         }
 
         // If no longer abandoned, update
         if (wtxIn.hashBlock.IsNull() && wtx.isAbandoned()) {
             wtx.hashBlock = wtxIn.hashBlock;
             fUpdated = true;
         }
 
         if (wtxIn.nIndex != -1 && (wtxIn.nIndex != wtx.nIndex)) {
             wtx.nIndex = wtxIn.nIndex;
             fUpdated = true;
         }
 
         if (wtxIn.fFromMe && wtxIn.fFromMe != wtx.fFromMe) {
             wtx.fFromMe = wtxIn.fFromMe;
             fUpdated = true;
         }
     }
 
     //// debug print
     LogPrintf("AddToWallet %s  %s%s\n", wtxIn.GetId().ToString(),
               (fInsertedNew ? "new" : ""), (fUpdated ? "update" : ""));
 
     // Write to disk
     if ((fInsertedNew || fUpdated) && !walletdb.WriteTx(wtx)) {
         return false;
     }
 
     // Break debit/credit balance caches:
     wtx.MarkDirty();
 
     // Notify UI of new or updated transaction.
     NotifyTransactionChanged(this, hash, fInsertedNew ? CT_NEW : CT_UPDATED);
 
     // Notify an external script when a wallet transaction comes in or is
     // updated.
     std::string strCmd = GetArg("-walletnotify", "");
 
     if (!strCmd.empty()) {
         boost::replace_all(strCmd, "%s", wtxIn.GetId().GetHex());
         // Thread runs free.
         boost::thread t(runCommand, strCmd);
     }
 
     return true;
 }
 
 bool CWallet::LoadToWallet(const CWalletTx &wtxIn) {
     uint256 txid = wtxIn.GetId();
 
     mapWallet[txid] = wtxIn;
     CWalletTx &wtx = mapWallet[txid];
     wtx.BindWallet(this);
     wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
     AddToSpends(txid);
     for (const CTxIn &txin : wtx.tx->vin) {
         if (mapWallet.count(txin.prevout.hash)) {
             CWalletTx &prevtx = mapWallet[txin.prevout.hash];
             if (prevtx.nIndex == -1 && !prevtx.hashUnset()) {
                 MarkConflicted(prevtx.hashBlock, wtx.GetId());
             }
         }
     }
 
     return true;
 }
 
 /**
  * Add a transaction to the wallet, or update it. pIndex and posInBlock should
  * be set when the transaction was known to be included in a block. When
  * posInBlock = SYNC_TRANSACTION_NOT_IN_BLOCK (-1), then wallet state is not
  * updated in AddToWallet, but notifications happen and cached balances are
  * marked dirty. If fUpdate is true, existing transactions will be updated.
  *
  * TODO: One exception to this is that the abandoned state is cleared under the
  * assumption that any further notification of a transaction that was considered
  * abandoned is an indication that it is not safe to be considered abandoned.
  * Abandoned state should probably be more carefuly tracked via different
  * posInBlock signals or by checking mempool presence when necessary.
  */
-bool CWallet::AddToWalletIfInvolvingMe(const CTransaction &tx,
+bool CWallet::AddToWalletIfInvolvingMe(const CTransactionRef &ptx,
                                        const CBlockIndex *pIndex,
                                        int posInBlock, bool fUpdate) {
+    const CTransaction &tx = *ptx;
     AssertLockHeld(cs_wallet);
 
     if (posInBlock != -1) {
         for (const CTxIn &txin : tx.vin) {
             std::pair<TxSpends::const_iterator, TxSpends::const_iterator>
                 range = mapTxSpends.equal_range(txin.prevout);
             while (range.first != range.second) {
                 if (range.first->second != tx.GetId()) {
                     LogPrintf("Transaction %s (in block %s) conflicts with "
                               "wallet transaction %s (both spend %s:%i)\n",
                               tx.GetId().ToString(),
                               pIndex->GetBlockHash().ToString(),
                               range.first->second.ToString(),
                               range.first->first.hash.ToString(),
                               range.first->first.n);
                     MarkConflicted(pIndex->GetBlockHash(), range.first->second);
                 }
                 range.first++;
             }
         }
     }
 
     bool fExisted = mapWallet.count(tx.GetId()) != 0;
     if (fExisted && !fUpdate) {
         return false;
     }
-
     if (fExisted || IsMine(tx) || IsFromMe(tx)) {
-        CWalletTx wtx(this, MakeTransactionRef(tx));
+        CWalletTx wtx(this, ptx);
 
         // Get merkle branch if transaction was found in a block.
         if (posInBlock != -1) {
             wtx.SetMerkleBranch(pIndex, posInBlock);
         }
 
         return AddToWallet(wtx, false);
     }
 
     return false;
 }
 
 bool CWallet::AbandonTransaction(const uint256 &hashTx) {
     LOCK2(cs_main, cs_wallet);
 
     CWalletDB walletdb(strWalletFile, "r+");
 
     std::set<uint256> todo;
     std::set<uint256> done;
 
     // Can't mark abandoned if confirmed or in mempool.
     assert(mapWallet.count(hashTx));
     CWalletTx &origtx = mapWallet[hashTx];
     if (origtx.GetDepthInMainChain() > 0 || origtx.InMempool()) {
         return false;
     }
 
     todo.insert(hashTx);
 
     while (!todo.empty()) {
         uint256 now = *todo.begin();
         todo.erase(now);
         done.insert(now);
         assert(mapWallet.count(now));
         CWalletTx &wtx = mapWallet[now];
         int currentconfirm = wtx.GetDepthInMainChain();
         // If the orig tx was not in block, none of its spends can be.
         assert(currentconfirm <= 0);
         // If (currentconfirm < 0) {Tx and spends are already conflicted, no
         // need to abandon}
         if (currentconfirm == 0 && !wtx.isAbandoned()) {
             // If the orig tx was not in block/mempool, none of its spends can
             // be in mempool.
             assert(!wtx.InMempool());
             wtx.nIndex = -1;
             wtx.setAbandoned();
             wtx.MarkDirty();
             walletdb.WriteTx(wtx);
             NotifyTransactionChanged(this, wtx.GetId(), CT_UPDATED);
             // Iterate over all its outputs, and mark transactions in the wallet
             // that spend them abandoned too.
             TxSpends::const_iterator iter =
                 mapTxSpends.lower_bound(COutPoint(hashTx, 0));
             while (iter != mapTxSpends.end() && iter->first.hash == now) {
                 if (!done.count(iter->second)) {
                     todo.insert(iter->second);
                 }
                 iter++;
             }
 
             // If a transaction changes 'conflicted' state, that changes the
             // balance available of the outputs it spends. So force those to be
             // recomputed.
             for (const CTxIn &txin : wtx.tx->vin) {
-                if (mapWallet.count(txin.prevout.hash))
+                if (mapWallet.count(txin.prevout.hash)) {
                     mapWallet[txin.prevout.hash].MarkDirty();
+                }
             }
         }
     }
 
     return true;
 }
 
 void CWallet::MarkConflicted(const uint256 &hashBlock, const uint256 &hashTx) {
     LOCK2(cs_main, cs_wallet);
 
     int conflictconfirms = 0;
     if (mapBlockIndex.count(hashBlock)) {
         CBlockIndex *pindex = mapBlockIndex[hashBlock];
         if (chainActive.Contains(pindex)) {
             conflictconfirms = -(chainActive.Height() - pindex->nHeight + 1);
         }
     }
 
     // If number of conflict confirms cannot be determined, this means that the
     // block is still unknown or not yet part of the main chain, for example
     // when loading the wallet during a reindex. Do nothing in that case.
     if (conflictconfirms >= 0) {
         return;
     }
 
-    // Do not flush the wallet here for performance reasons
+    // Do not flush the wallet here for performance reasons.
     CWalletDB walletdb(strWalletFile, "r+", false);
 
     std::set<uint256> todo;
     std::set<uint256> done;
 
     todo.insert(hashTx);
 
     while (!todo.empty()) {
         uint256 now = *todo.begin();
         todo.erase(now);
         done.insert(now);
         assert(mapWallet.count(now));
         CWalletTx &wtx = mapWallet[now];
         int currentconfirm = wtx.GetDepthInMainChain();
         if (conflictconfirms < currentconfirm) {
             // Block is 'more conflicted' than current confirm; update.
             // Mark transaction as conflicted with this block.
             wtx.nIndex = -1;
             wtx.hashBlock = hashBlock;
             wtx.MarkDirty();
             walletdb.WriteTx(wtx);
             // Iterate over all its outputs, and mark transactions in the wallet
             // that spend them conflicted too.
             TxSpends::const_iterator iter =
                 mapTxSpends.lower_bound(COutPoint(now, 0));
             while (iter != mapTxSpends.end() && iter->first.hash == now) {
                 if (!done.count(iter->second)) {
                     todo.insert(iter->second);
                 }
                 iter++;
             }
 
             // If a transaction changes 'conflicted' state, that changes the
             // balance available of the outputs it spends. So force those to be
             // recomputed.
             for (const CTxIn &txin : wtx.tx->vin) {
                 if (mapWallet.count(txin.prevout.hash)) {
                     mapWallet[txin.prevout.hash].MarkDirty();
                 }
             }
         }
     }
 }
 
-void CWallet::SyncTransaction(const CTransaction &tx, const CBlockIndex *pindex,
+void CWallet::SyncTransaction(const CTransactionRef &ptx,
+                              const CBlockIndex *pindexBlockConnected,
                               int posInBlock) {
-    LOCK2(cs_main, cs_wallet);
+    const CTransaction &tx = *ptx;
 
-    if (!AddToWalletIfInvolvingMe(tx, pindex, posInBlock, true)) {
+    if (!AddToWalletIfInvolvingMe(ptx, pindexBlockConnected, posInBlock,
+                                  true)) {
         // Not one of ours
         return;
     }
 
     // If a transaction changes 'conflicted' state, that changes the balance
     // available of the outputs it spends. So force those to be recomputed,
     // also:
     for (const CTxIn &txin : tx.vin) {
-        if (mapWallet.count(txin.prevout.hash))
+        if (mapWallet.count(txin.prevout.hash)) {
             mapWallet[txin.prevout.hash].MarkDirty();
+        }
+    }
+}
+
+void CWallet::TransactionAddedToMempool(const CTransactionRef &ptx) {
+    LOCK2(cs_main, cs_wallet);
+    SyncTransaction(ptx, nullptr, -1);
+}
+
+void CWallet::BlockConnected(
+    const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex,
+    const std::vector<CTransactionRef> &vtxConflicted) {
+    LOCK2(cs_main, cs_wallet);
+    // TODO: Tempoarily ensure that mempool removals are notified before
+    // connected transactions. This shouldn't matter, but the abandoned state of
+    // transactions in our wallet is currently cleared when we receive another
+    // notification and there is a race condition where notification of a
+    // connected conflict might cause an outside process to abandon a
+    // transaction and then have it inadvertantly cleared by the notification
+    // that the conflicted transaction was evicted.
+
+    for (const CTransactionRef &ptx : vtxConflicted) {
+        SyncTransaction(ptx, nullptr, -1);
+    }
+    for (size_t i = 0; i < pblock->vtx.size(); i++) {
+        SyncTransaction(pblock->vtx[i], pindex, i);
+    }
+
+    // The GUI expects a NotifyTransactionChanged when a coinbase tx
+    // which is in our wallet moves from in-the-best-block to
+    // 2-confirmations (as it only displays them at that time).
+    // We do that here.
+    if (hashPrevBestCoinbase.IsNull()) {
+        // Immediately after restart we have no idea what the coinbase
+        // transaction from the previous block is.
+        // For correctness we scan over the entire wallet, looking for
+        // the previous block's coinbase, just in case it is ours, so
+        // that we can notify the UI that it should now be displayed.
+        if (pindex->pprev) {
+            for (const std::pair<uint256, CWalletTx> &p : mapWallet) {
+                if (p.second.IsCoinBase() &&
+                    p.second.hashBlock == pindex->pprev->GetBlockHash()) {
+                    NotifyTransactionChanged(this, p.first, CT_UPDATED);
+                    break;
+                }
+            }
+        }
+    } else {
+        std::map<uint256, CWalletTx>::const_iterator mi =
+            mapWallet.find(hashPrevBestCoinbase);
+        if (mi != mapWallet.end()) {
+            NotifyTransactionChanged(this, hashPrevBestCoinbase, CT_UPDATED);
+        }
+    }
+
+    hashPrevBestCoinbase = pblock->vtx[0]->GetHash();
+}
+
+void CWallet::BlockDisconnected(const std::shared_ptr<const CBlock> &pblock) {
+    LOCK2(cs_main, cs_wallet);
+
+    for (const CTransactionRef &ptx : pblock->vtx) {
+        SyncTransaction(ptx, nullptr, -1);
     }
 }
 
 isminetype CWallet::IsMine(const CTxIn &txin) const {
     LOCK(cs_wallet);
     std::map<uint256, CWalletTx>::const_iterator mi =
         mapWallet.find(txin.prevout.hash);
     if (mi != mapWallet.end()) {
         const CWalletTx &prev = (*mi).second;
         if (txin.prevout.n < prev.tx->vout.size()) {
             return IsMine(prev.tx->vout[txin.prevout.n]);
         }
     }
 
     return ISMINE_NO;
 }
 
 // Note that this function doesn't distinguish between a 0-valued input, and a
 // not-"is mine" (according to the filter) input.
 Amount CWallet::GetDebit(const CTxIn &txin, const isminefilter &filter) const {
     LOCK(cs_wallet);
     std::map<uint256, CWalletTx>::const_iterator mi =
         mapWallet.find(txin.prevout.hash);
     if (mi != mapWallet.end()) {
         const CWalletTx &prev = (*mi).second;
         if (txin.prevout.n < prev.tx->vout.size()) {
             if (IsMine(prev.tx->vout[txin.prevout.n]) & filter) {
                 return prev.tx->vout[txin.prevout.n].nValue;
             }
         }
     }
 
     return Amount(0);
 }
 
 isminetype CWallet::IsMine(const CTxOut &txout) const {
     return ::IsMine(*this, txout.scriptPubKey);
 }
 
 Amount CWallet::GetCredit(const CTxOut &txout,
                           const isminefilter &filter) const {
     if (!MoneyRange(txout.nValue)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": value out of range");
     }
 
     return (IsMine(txout) & filter) ? txout.nValue : Amount(0);
 }
 
 bool CWallet::IsChange(const CTxOut &txout) const {
     // TODO: fix handling of 'change' outputs. The assumption is that any
     // payment to a script that is ours, but is not in the address book is
     // change. That assumption is likely to break when we implement
     // multisignature wallets that return change back into a
     // multi-signature-protected address; a better way of identifying which
     // outputs are 'the send' and which are 'the change' will need to be
     // implemented (maybe extend CWalletTx to remember which output, if any, was
     // change).
     if (::IsMine(*this, txout.scriptPubKey)) {
         CTxDestination address;
         if (!ExtractDestination(txout.scriptPubKey, address)) {
             return true;
         }
 
         LOCK(cs_wallet);
         if (!mapAddressBook.count(address)) {
             return true;
         }
     }
 
     return false;
 }
 
 Amount CWallet::GetChange(const CTxOut &txout) const {
     if (!MoneyRange(txout.nValue)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": value out of range");
     }
 
     return (IsChange(txout) ? txout.nValue : Amount(0));
 }
 
 bool CWallet::IsMine(const CTransaction &tx) const {
     for (const CTxOut &txout : tx.vout) {
         if (IsMine(txout)) {
             return true;
         }
     }
 
     return false;
 }
 
 bool CWallet::IsFromMe(const CTransaction &tx) const {
     return GetDebit(tx, ISMINE_ALL) > Amount(0);
 }
 
 Amount CWallet::GetDebit(const CTransaction &tx,
                          const isminefilter &filter) const {
     Amount nDebit(0);
     for (const CTxIn &txin : tx.vin) {
         nDebit += GetDebit(txin, filter);
         if (!MoneyRange(nDebit)) {
             throw std::runtime_error(std::string(__func__) +
                                      ": value out of range");
         }
     }
 
     return nDebit;
 }
 
 bool CWallet::IsAllFromMe(const CTransaction &tx,
                           const isminefilter &filter) const {
     LOCK(cs_wallet);
 
     for (const CTxIn &txin : tx.vin) {
         auto mi = mapWallet.find(txin.prevout.hash);
         if (mi == mapWallet.end()) {
             // Any unknown inputs can't be from us.
             return false;
         }
 
         const CWalletTx &prev = (*mi).second;
 
         if (txin.prevout.n >= prev.tx->vout.size()) {
             // Invalid input!
             return false;
         }
 
         if (!(IsMine(prev.tx->vout[txin.prevout.n]) & filter)) {
             return false;
         }
     }
 
     return true;
 }
 
 Amount CWallet::GetCredit(const CTransaction &tx,
                           const isminefilter &filter) const {
     Amount nCredit(0);
     for (const CTxOut &txout : tx.vout) {
         nCredit += GetCredit(txout, filter);
         if (!MoneyRange(nCredit)) {
             throw std::runtime_error(std::string(__func__) +
                                      ": value out of range");
         }
     }
 
     return nCredit;
 }
 
 Amount CWallet::GetChange(const CTransaction &tx) const {
     Amount nChange(0);
     for (const CTxOut &txout : tx.vout) {
         nChange += GetChange(txout);
         if (!MoneyRange(nChange)) {
             throw std::runtime_error(std::string(__func__) +
                                      ": value out of range");
         }
     }
 
     return nChange;
 }
 
 CPubKey CWallet::GenerateNewHDMasterKey() {
     CKey key;
     key.MakeNewKey(true);
 
     int64_t nCreationTime = GetTime();
     CKeyMetadata metadata(nCreationTime);
 
     // Calculate the pubkey.
     CPubKey pubkey = key.GetPubKey();
     assert(key.VerifyPubKey(pubkey));
 
     // Set the hd keypath to "m" -> Master, refers the masterkeyid to itself.
     metadata.hdKeypath = "m";
     metadata.hdMasterKeyID = pubkey.GetID();
 
     LOCK(cs_wallet);
 
     // mem store the metadata
     mapKeyMetadata[pubkey.GetID()] = metadata;
 
     // Write the key&metadata to the database.
     if (!AddKeyPubKey(key, pubkey)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": AddKeyPubKey failed");
     }
 
     return pubkey;
 }
 
 bool CWallet::SetHDMasterKey(const CPubKey &pubkey,
                              CHDChain *possibleOldChain) {
     LOCK(cs_wallet);
 
     // Store the keyid (hash160) together with the child index counter in the
     // database as a hdchain object.
     CHDChain newHdChain;
     if (possibleOldChain) {
         // preserve the old chains version
         newHdChain.nVersion = possibleOldChain->nVersion;
     }
     newHdChain.masterKeyID = pubkey.GetID();
     SetHDChain(newHdChain, false);
 
     return true;
 }
 
 bool CWallet::SetHDChain(const CHDChain &chain, bool memonly) {
     LOCK(cs_wallet);
     if (!memonly && !CWalletDB(strWalletFile).WriteHDChain(chain)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": writing chain failed");
     }
 
     hdChain = chain;
     return true;
 }
 
 bool CWallet::IsHDEnabled() {
     return !hdChain.masterKeyID.IsNull();
 }
 
 int64_t CWalletTx::GetTxTime() const {
     int64_t n = nTimeSmart;
     return n ? n : nTimeReceived;
 }
 
 int CWalletTx::GetRequestCount() const {
     LOCK(pwallet->cs_wallet);
 
     // Returns -1 if it wasn't being tracked.
     int nRequests = -1;
 
     if (IsCoinBase()) {
         // Generated block.
         if (!hashUnset()) {
             std::map<uint256, int>::const_iterator mi =
                 pwallet->mapRequestCount.find(hashBlock);
             if (mi != pwallet->mapRequestCount.end()) {
                 nRequests = (*mi).second;
             }
         }
     } else {
         // Did anyone request this transaction?
         std::map<uint256, int>::const_iterator mi =
             pwallet->mapRequestCount.find(GetId());
         if (mi != pwallet->mapRequestCount.end()) {
             nRequests = (*mi).second;
 
             // How about the block it's in?
             if (nRequests == 0 && !hashUnset()) {
                 std::map<uint256, int>::const_iterator _mi =
                     pwallet->mapRequestCount.find(hashBlock);
                 if (_mi != pwallet->mapRequestCount.end()) {
                     nRequests = (*_mi).second;
                 } else {
                     // If it's in someone else's block it must have got out.
                     nRequests = 1;
                 }
             }
         }
     }
 
     return nRequests;
 }
 
 void CWalletTx::GetAmounts(std::list<COutputEntry> &listReceived,
                            std::list<COutputEntry> &listSent, Amount &nFee,
                            std::string &strSentAccount,
                            const isminefilter &filter) const {
     nFee = Amount(0);
     listReceived.clear();
     listSent.clear();
     strSentAccount = strFromAccount;
 
     // Compute fee:
     Amount nDebit = GetDebit(filter);
     // debit>0 means we signed/sent this transaction.
     if (nDebit > Amount(0)) {
         Amount nValueOut = tx->GetValueOut();
         nFee = (nDebit - nValueOut);
     }
 
     // Sent/received.
     for (unsigned int i = 0; i < tx->vout.size(); ++i) {
         const CTxOut &txout = tx->vout[i];
         isminetype fIsMine = pwallet->IsMine(txout);
         // Only need to handle txouts if AT LEAST one of these is true:
         //   1) they debit from us (sent)
         //   2) the output is to us (received)
         if (nDebit > Amount(0)) {
             // Don't report 'change' txouts
             if (pwallet->IsChange(txout)) {
                 continue;
             }
         } else if (!(fIsMine & filter)) {
             continue;
         }
 
         // In either case, we need to get the destination address.
         CTxDestination address;
 
         if (!ExtractDestination(txout.scriptPubKey, address) &&
             !txout.scriptPubKey.IsUnspendable()) {
             LogPrintf("CWalletTx::GetAmounts: Unknown transaction type found, "
                       "txid %s\n",
                       this->GetId().ToString());
             address = CNoDestination();
         }
 
         COutputEntry output = {address, txout.nValue, (int)i};
 
         // If we are debited by the transaction, add the output as a "sent"
         // entry.
         if (nDebit > Amount(0)) {
             listSent.push_back(output);
         }
 
         // If we are receiving the output, add it as a "received" entry.
         if (fIsMine & filter) {
             listReceived.push_back(output);
         }
     }
 }
 
 void CWalletTx::GetAccountAmounts(const std::string &strAccount,
                                   Amount &nReceived, Amount &nSent,
                                   Amount &nFee,
                                   const isminefilter &filter) const {
     nReceived = nSent = nFee = Amount(0);
 
     Amount allFee;
     std::string strSentAccount;
     std::list<COutputEntry> listReceived;
     std::list<COutputEntry> listSent;
     GetAmounts(listReceived, listSent, allFee, strSentAccount, filter);
 
     if (strAccount == strSentAccount) {
         for (const COutputEntry &s : listSent) {
             nSent += s.amount;
         }
         nFee = allFee;
     }
 
     LOCK(pwallet->cs_wallet);
     for (const COutputEntry &r : listReceived) {
         if (pwallet->mapAddressBook.count(r.destination)) {
             std::map<CTxDestination, CAddressBookData>::const_iterator mi =
                 pwallet->mapAddressBook.find(r.destination);
             if (mi != pwallet->mapAddressBook.end() &&
                 (*mi).second.name == strAccount) {
                 nReceived += r.amount;
             }
         } else if (strAccount.empty()) {
             nReceived += r.amount;
         }
     }
 }
 
 /**
  * Scan the block chain (starting in pindexStart) for transactions from or to
  * us. If fUpdate is true, found transactions that already exist in the wallet
  * will be updated.
  *
  * Returns pointer to the first block in the last contiguous range that was
  * successfully scanned or elided (elided if pIndexStart points at a block
  * before CWallet::nTimeFirstKey). Returns null if there is no such range, or
  * the range doesn't include chainActive.Tip().
  */
 CBlockIndex *CWallet::ScanForWalletTransactions(CBlockIndex *pindexStart,
                                                 bool fUpdate) {
     LOCK2(cs_main, cs_wallet);
 
     int64_t nNow = GetTime();
     const CChainParams &chainParams = Params();
 
     CBlockIndex *pindex = pindexStart;
     CBlockIndex *ret = pindexStart;
 
     // No need to read and scan block, if block was created before our wallet
     // birthday (as adjusted for block time variability)
     while (pindex && nTimeFirstKey &&
            (pindex->GetBlockTime() < (nTimeFirstKey - 7200))) {
         pindex = chainActive.Next(pindex);
     }
 
     // Show rescan progress in GUI as dialog or on splashscreen, if -rescan on
     // startup.
     ShowProgress(_("Rescanning..."), 0);
     double dProgressStart =
         GuessVerificationProgress(chainParams.TxData(), pindex);
     double dProgressTip =
         GuessVerificationProgress(chainParams.TxData(), chainActive.Tip());
     while (pindex) {
         if (pindex->nHeight % 100 == 0 && dProgressTip - dProgressStart > 0.0) {
             ShowProgress(
                 _("Rescanning..."),
                 std::max(1,
                          std::min(99, (int)((GuessVerificationProgress(
                                                  chainParams.TxData(), pindex) -
                                              dProgressStart) /
                                             (dProgressTip - dProgressStart) *
                                             100))));
         }
 
         CBlock block;
         if (ReadBlockFromDisk(block, pindex, GetConfig())) {
             for (size_t posInBlock = 0; posInBlock < block.vtx.size();
                  ++posInBlock) {
-                AddToWalletIfInvolvingMe(*block.vtx[posInBlock], pindex,
+                AddToWalletIfInvolvingMe(block.vtx[posInBlock], pindex,
                                          posInBlock, fUpdate);
             }
-
             if (!ret) {
                 ret = pindex;
             }
         } else {
             ret = nullptr;
         }
 
         pindex = chainActive.Next(pindex);
         if (GetTime() >= nNow + 60) {
             nNow = GetTime();
             LogPrintf("Still rescanning. At block %d. Progress=%f\n",
                       pindex->nHeight,
                       GuessVerificationProgress(chainParams.TxData(), pindex));
         }
     }
 
     // Hide progress dialog in GUI.
     ShowProgress(_("Rescanning..."), 100);
 
     return ret;
 }
 
 void CWallet::ReacceptWalletTransactions() {
     // If transactions aren't being broadcasted, don't let them into local
     // mempool either.
     if (!fBroadcastTransactions) {
         return;
     }
 
     LOCK2(cs_main, cs_wallet);
     std::map<int64_t, CWalletTx *> mapSorted;
 
     // Sort pending wallet transactions based on their initial wallet insertion
     // order.
     for (std::pair<const uint256, CWalletTx> &item : mapWallet) {
         const uint256 &wtxid = item.first;
         CWalletTx &wtx = item.second;
         assert(wtx.GetId() == wtxid);
 
         int nDepth = wtx.GetDepthInMainChain();
 
         if (!wtx.IsCoinBase() && (nDepth == 0 && !wtx.isAbandoned())) {
             mapSorted.insert(std::make_pair(wtx.nOrderPos, &wtx));
         }
     }
 
     // Try to add wallet transactions to memory pool.
     for (std::pair<const int64_t, CWalletTx *> &item : mapSorted) {
         CWalletTx &wtx = *(item.second);
 
         LOCK(mempool.cs);
         CValidationState state;
         wtx.AcceptToMemoryPool(maxTxFee, state);
     }
 }
 
 bool CWalletTx::RelayWalletTransaction(CConnman *connman) {
     assert(pwallet->GetBroadcastTransactions());
     if (IsCoinBase() || isAbandoned() || GetDepthInMainChain() != 0) {
         return false;
     }
 
     CValidationState state;
     // GetDepthInMainChain already catches known conflicts.
     if (InMempool() || AcceptToMemoryPool(maxTxFee, state)) {
         LogPrintf("Relaying wtx %s\n", GetId().ToString());
         if (connman) {
             CInv inv(MSG_TX, GetId());
             connman->ForEachNode(
                 [&inv](CNode *pnode) { pnode->PushInventory(inv); });
             return true;
         }
     }
 
     return false;
 }
 
 std::set<uint256> CWalletTx::GetConflicts() const {
     std::set<uint256> result;
     if (pwallet != nullptr) {
         uint256 myHash = GetId();
         result = pwallet->GetConflicts(myHash);
         result.erase(myHash);
     }
 
     return result;
 }
 
 Amount CWalletTx::GetDebit(const isminefilter &filter) const {
     if (tx->vin.empty()) return Amount(0);
 
     Amount debit(0);
     if (filter & ISMINE_SPENDABLE) {
         if (fDebitCached) {
             debit += nDebitCached;
         } else {
             nDebitCached = pwallet->GetDebit(*this, ISMINE_SPENDABLE);
             fDebitCached = true;
             debit += nDebitCached;
         }
     }
 
     if (filter & ISMINE_WATCH_ONLY) {
         if (fWatchDebitCached) {
             debit += nWatchDebitCached;
         } else {
             nWatchDebitCached = pwallet->GetDebit(*this, ISMINE_WATCH_ONLY);
             fWatchDebitCached = true;
             debit += Amount(nWatchDebitCached);
         }
     }
 
     return debit;
 }
 
 Amount CWalletTx::GetCredit(const isminefilter &filter) const {
     // Must wait until coinbase is safely deep enough in the chain before
     // valuing it.
     if (IsCoinBase() && GetBlocksToMaturity() > 0) {
         return Amount(0);
     }
 
     Amount credit(0);
     if (filter & ISMINE_SPENDABLE) {
         // GetBalance can assume transactions in mapWallet won't change.
         if (fCreditCached) {
             credit += nCreditCached;
         } else {
             nCreditCached = pwallet->GetCredit(*this, ISMINE_SPENDABLE);
             fCreditCached = true;
             credit += nCreditCached;
         }
     }
 
     if (filter & ISMINE_WATCH_ONLY) {
         if (fWatchCreditCached) {
             credit += nWatchCreditCached;
         } else {
             nWatchCreditCached = pwallet->GetCredit(*this, ISMINE_WATCH_ONLY);
             fWatchCreditCached = true;
             credit += nWatchCreditCached;
         }
     }
 
     return credit;
 }
 
 Amount CWalletTx::GetImmatureCredit(bool fUseCache) const {
     if (IsCoinBase() && GetBlocksToMaturity() > 0 && IsInMainChain()) {
         if (fUseCache && fImmatureCreditCached) return nImmatureCreditCached;
         nImmatureCreditCached = pwallet->GetCredit(*this, ISMINE_SPENDABLE);
         fImmatureCreditCached = true;
         return nImmatureCreditCached;
     }
 
     return Amount(0);
 }
 
 Amount CWalletTx::GetAvailableCredit(bool fUseCache) const {
     if (pwallet == 0) {
         return Amount(0);
     }
 
     // Must wait until coinbase is safely deep enough in the chain before
     // valuing it.
     if (IsCoinBase() && GetBlocksToMaturity() > 0) {
         return Amount(0);
     }
 
     if (fUseCache && fAvailableCreditCached) {
         return nAvailableCreditCached;
     }
 
     Amount nCredit(0);
     uint256 hashTx = GetId();
     for (unsigned int i = 0; i < tx->vout.size(); i++) {
         if (!pwallet->IsSpent(hashTx, i)) {
             const CTxOut &txout = tx->vout[i];
             nCredit += pwallet->GetCredit(txout, ISMINE_SPENDABLE);
             if (!MoneyRange(nCredit)) {
                 throw std::runtime_error(
                     "CWalletTx::GetAvailableCredit() : value out of range");
             }
         }
     }
 
     nAvailableCreditCached = nCredit;
     fAvailableCreditCached = true;
     return nCredit;
 }
 
 Amount CWalletTx::GetImmatureWatchOnlyCredit(const bool &fUseCache) const {
     if (IsCoinBase() && GetBlocksToMaturity() > 0 && IsInMainChain()) {
         if (fUseCache && fImmatureWatchCreditCached) {
             return nImmatureWatchCreditCached;
         }
 
         nImmatureWatchCreditCached =
             pwallet->GetCredit(*this, ISMINE_WATCH_ONLY);
         fImmatureWatchCreditCached = true;
         return nImmatureWatchCreditCached;
     }
 
     return Amount(0);
 }
 
 Amount CWalletTx::GetAvailableWatchOnlyCredit(const bool &fUseCache) const {
     if (pwallet == 0) {
         return Amount(0);
     }
 
     // Must wait until coinbase is safely deep enough in the chain before
     // valuing it.
     if (IsCoinBase() && GetBlocksToMaturity() > 0) {
         return Amount(0);
     }
 
     if (fUseCache && fAvailableWatchCreditCached) {
         return nAvailableWatchCreditCached;
     }
 
     Amount nCredit(0);
     for (unsigned int i = 0; i < tx->vout.size(); i++) {
         if (!pwallet->IsSpent(GetId(), i)) {
             const CTxOut &txout = tx->vout[i];
             nCredit += pwallet->GetCredit(txout, ISMINE_WATCH_ONLY);
             if (!MoneyRange(nCredit)) {
                 throw std::runtime_error(
                     "CWalletTx::GetAvailableCredit() : value out of range");
             }
         }
     }
 
     nAvailableWatchCreditCached = nCredit;
     fAvailableWatchCreditCached = true;
     return nCredit;
 }
 
 Amount CWalletTx::GetChange() const {
     if (fChangeCached) {
         return nChangeCached;
     }
 
     nChangeCached = pwallet->GetChange(*this);
     fChangeCached = true;
     return nChangeCached;
 }
 
 bool CWalletTx::InMempool() const {
     LOCK(mempool.cs);
     if (mempool.exists(GetId())) {
         return true;
     }
 
     return false;
 }
 
 bool CWalletTx::IsTrusted() const {
     // Quick answer in most cases
     if (!CheckFinalTx(*this)) {
         return false;
     }
 
     int nDepth = GetDepthInMainChain();
     if (nDepth >= 1) {
         return true;
     }
 
     if (nDepth < 0) {
         return false;
     }
 
     // using wtx's cached debit
     if (!bSpendZeroConfChange || !IsFromMe(ISMINE_ALL)) {
         return false;
     }
 
     // Don't trust unconfirmed transactions from us unless they are in the
     // mempool.
     if (!InMempool()) {
         return false;
     }
 
     // Trusted if all inputs are from us and are in the mempool:
     for (const CTxIn &txin : tx->vin) {
         // Transactions not sent by us: not trusted
         const CWalletTx *parent = pwallet->GetWalletTx(txin.prevout.hash);
         if (parent == nullptr) {
             return false;
         }
 
         const CTxOut &parentOut = parent->tx->vout[txin.prevout.n];
         if (pwallet->IsMine(parentOut) != ISMINE_SPENDABLE) {
             return false;
         }
     }
 
     return true;
 }
 
 bool CWalletTx::IsEquivalentTo(const CWalletTx &_tx) const {
     CMutableTransaction tx1 = *this->tx;
     CMutableTransaction tx2 = *_tx.tx;
     for (unsigned int i = 0; i < tx1.vin.size(); i++) {
         tx1.vin[i].scriptSig = CScript();
     }
 
     for (unsigned int i = 0; i < tx2.vin.size(); i++) {
         tx2.vin[i].scriptSig = CScript();
     }
 
     return CTransaction(tx1) == CTransaction(tx2);
 }
 
 std::vector<uint256>
 CWallet::ResendWalletTransactionsBefore(int64_t nTime, CConnman *connman) {
     std::vector<uint256> result;
 
     LOCK(cs_wallet);
     // Sort them in chronological order
     std::multimap<unsigned int, CWalletTx *> mapSorted;
     for (std::pair<const uint256, CWalletTx> &item : mapWallet) {
         CWalletTx &wtx = item.second;
         // Don't rebroadcast if newer than nTime:
         if (wtx.nTimeReceived > nTime) {
             continue;
         }
 
         mapSorted.insert(std::make_pair(wtx.nTimeReceived, &wtx));
     }
 
     for (std::pair<const unsigned int, CWalletTx *> &item : mapSorted) {
         CWalletTx &wtx = *item.second;
         if (wtx.RelayWalletTransaction(connman)) {
             result.push_back(wtx.GetId());
         }
     }
 
     return result;
 }
 
 void CWallet::ResendWalletTransactions(int64_t nBestBlockTime,
                                        CConnman *connman) {
     // Do this infrequently and randomly to avoid giving away that these are our
     // transactions.
     if (GetTime() < nNextResend || !fBroadcastTransactions) {
         return;
     }
 
     bool fFirst = (nNextResend == 0);
     nNextResend = GetTime() + GetRand(30 * 60);
     if (fFirst) {
         return;
     }
 
     // Only do it if there's been a new block since last time
     if (nBestBlockTime < nLastResend) {
         return;
     }
 
     nLastResend = GetTime();
 
     // Rebroadcast unconfirmed txes older than 5 minutes before the last block
     // was found:
     std::vector<uint256> relayed =
         ResendWalletTransactionsBefore(nBestBlockTime - 5 * 60, connman);
     if (!relayed.empty()) {
         LogPrintf("%s: rebroadcast %u unconfirmed transactions\n", __func__,
                   relayed.size());
     }
 }
 
 /** @} */ // end of mapWallet
 
 /**
  * @defgroup Actions
  *
  * @{
  */
 Amount CWallet::GetBalance() const {
     LOCK2(cs_main, cs_wallet);
 
     Amount nTotal(0);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx *pcoin = &(*it).second;
         if (pcoin->IsTrusted()) {
             nTotal += pcoin->GetAvailableCredit();
         }
     }
 
     return nTotal;
 }
 
 Amount CWallet::GetUnconfirmedBalance() const {
     LOCK2(cs_main, cs_wallet);
 
     Amount nTotal(0);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx *pcoin = &(*it).second;
         if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 &&
             pcoin->InMempool()) {
             nTotal += pcoin->GetAvailableCredit();
         }
     }
 
     return nTotal;
 }
 
 Amount CWallet::GetImmatureBalance() const {
     LOCK2(cs_main, cs_wallet);
 
     Amount nTotal(0);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx *pcoin = &(*it).second;
         nTotal += pcoin->GetImmatureCredit();
     }
 
     return nTotal;
 }
 
 Amount CWallet::GetWatchOnlyBalance() const {
     LOCK2(cs_main, cs_wallet);
 
     Amount nTotal(0);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx *pcoin = &(*it).second;
         if (pcoin->IsTrusted()) {
             nTotal += pcoin->GetAvailableWatchOnlyCredit();
         }
     }
 
     return nTotal;
 }
 
 Amount CWallet::GetUnconfirmedWatchOnlyBalance() const {
     LOCK2(cs_main, cs_wallet);
 
     Amount nTotal(0);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx *pcoin = &(*it).second;
         if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 &&
             pcoin->InMempool()) {
             nTotal += pcoin->GetAvailableWatchOnlyCredit();
         }
     }
 
     return nTotal;
 }
 
 Amount CWallet::GetImmatureWatchOnlyBalance() const {
     LOCK2(cs_main, cs_wallet);
 
     Amount nTotal(0);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx *pcoin = &(*it).second;
         nTotal += pcoin->GetImmatureWatchOnlyCredit();
     }
 
     return nTotal;
 }
 
 void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlyConfirmed,
                              const CCoinControl *coinControl,
                              bool fIncludeZeroValue) const {
     vCoins.clear();
 
     LOCK2(cs_main, cs_wallet);
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const uint256 &wtxid = it->first;
         const CWalletTx *pcoin = &(*it).second;
 
         if (!CheckFinalTx(*pcoin)) {
             continue;
         }
 
         if (fOnlyConfirmed && !pcoin->IsTrusted()) {
             continue;
         }
 
         if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0) {
             continue;
         }
 
         int nDepth = pcoin->GetDepthInMainChain();
         if (nDepth < 0) {
             continue;
         }
 
         // We should not consider coins which aren't at least in our mempool.
         // It's possible for these to be conflicted via ancestors which we may
         // never be able to detect.
         if (nDepth == 0 && !pcoin->InMempool()) {
             continue;
         }
 
         // Bitcoin-ABC: Removed check that prevents consideration of coins from
         // transactions that are replacing other transactions. This check based
         // on pcoin->mapValue.count("replaces_txid") which was not being set
         // anywhere.
 
         // Similarly, we should not consider coins from transactions that have
         // been replaced. In the example above, we would want to prevent
         // creation of a transaction A' spending an output of A, because if
         // transaction B were initially confirmed, conflicting with A and A', we
         // wouldn't want to the user to create a transaction D intending to
         // replace A', but potentially resulting in a scenario where A, A', and
         // D could all be accepted (instead of just B and D, or just A and A'
         // like the user would want).
 
         // Bitcoin-ABC: retained this check as 'replaced_by_txid' is still set
         // in the wallet code.
         if (nDepth == 0 && fOnlyConfirmed &&
             pcoin->mapValue.count("replaced_by_txid")) {
             continue;
         }
 
         for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) {
             isminetype mine = IsMine(pcoin->tx->vout[i]);
             if (!(IsSpent(wtxid, i)) && mine != ISMINE_NO &&
                 !IsLockedCoin((*it).first, i) &&
                 (pcoin->tx->vout[i].nValue > Amount(0) || fIncludeZeroValue) &&
                 (!coinControl || !coinControl->HasSelected() ||
                  coinControl->fAllowOtherInputs ||
                  coinControl->IsSelected(COutPoint((*it).first, i)))) {
                 vCoins.push_back(COutput(
                     pcoin, i, nDepth,
                     ((mine & ISMINE_SPENDABLE) != ISMINE_NO) ||
                         (coinControl && coinControl->fAllowWatchOnly &&
                          (mine & ISMINE_WATCH_SOLVABLE) != ISMINE_NO),
                     (mine & (ISMINE_SPENDABLE | ISMINE_WATCH_SOLVABLE)) !=
                         ISMINE_NO));
             }
         }
     }
 }
 
 static void ApproximateBestSubset(
     std::vector<std::pair<Amount, std::pair<const CWalletTx *, unsigned int>>>
         vValue,
     const Amount nTotalLower, const Amount nTargetValue,
     std::vector<char> &vfBest, Amount &nBest, int iterations = 1000) {
     std::vector<char> vfIncluded;
 
     vfBest.assign(vValue.size(), true);
     nBest = nTotalLower;
 
     FastRandomContext insecure_rand;
 
     for (int nRep = 0; nRep < iterations && nBest != nTargetValue; nRep++) {
         vfIncluded.assign(vValue.size(), false);
         Amount nTotal(0);
         bool fReachedTarget = false;
         for (int nPass = 0; nPass < 2 && !fReachedTarget; nPass++) {
             for (size_t i = 0; i < vValue.size(); i++) {
                 // The solver here uses a randomized algorithm, the randomness
                 // serves no real security purpose but is just needed to prevent
                 // degenerate behavior and it is important that the rng is fast.
                 // We do not use a constant random sequence, because there may
                 // be some privacy improvement by making the selection random.
                 if (nPass == 0 ? insecure_rand.randbool() : !vfIncluded[i]) {
                     nTotal += vValue[i].first;
                     vfIncluded[i] = true;
                     if (nTotal >= nTargetValue) {
                         fReachedTarget = true;
                         if (nTotal < nBest) {
                             nBest = nTotal;
                             vfBest = vfIncluded;
                         }
 
                         nTotal -= vValue[i].first;
                         vfIncluded[i] = false;
                     }
                 }
             }
         }
     }
 }
 
 bool CWallet::SelectCoinsMinConf(
     const Amount nTargetValue, const int nConfMine, const int nConfTheirs,
     const uint64_t nMaxAncestors, std::vector<COutput> vCoins,
     std::set<std::pair<const CWalletTx *, unsigned int>> &setCoinsRet,
     Amount &nValueRet) const {
     setCoinsRet.clear();
     nValueRet = Amount(0);
 
     // List of values less than target
     std::pair<Amount, std::pair<const CWalletTx *, unsigned int>>
         coinLowestLarger;
     coinLowestLarger.first = MAX_MONEY;
     coinLowestLarger.second.first = nullptr;
     std::vector<std::pair<Amount, std::pair<const CWalletTx *, unsigned int>>>
         vValue;
     Amount nTotalLower(0);
 
     random_shuffle(vCoins.begin(), vCoins.end(), GetRandInt);
 
     for (const COutput &output : vCoins) {
         if (!output.fSpendable) {
             continue;
         }
 
         const CWalletTx *pcoin = output.tx;
 
         if (output.nDepth <
             (pcoin->IsFromMe(ISMINE_ALL) ? nConfMine : nConfTheirs)) {
             continue;
         }
 
         if (!mempool.TransactionWithinChainLimit(pcoin->GetId(),
                                                  nMaxAncestors)) {
             continue;
         }
 
         int i = output.i;
         Amount n = pcoin->tx->vout[i].nValue;
 
         std::pair<Amount, std::pair<const CWalletTx *, unsigned int>> coin =
             std::make_pair(n, std::make_pair(pcoin, i));
 
         if (n == nTargetValue) {
             setCoinsRet.insert(coin.second);
             nValueRet += coin.first;
             return true;
         } else if (n < nTargetValue + MIN_CHANGE) {
             vValue.push_back(coin);
             nTotalLower += n;
         } else if (n < coinLowestLarger.first) {
             coinLowestLarger = coin;
         }
     }
 
     if (nTotalLower == nTargetValue) {
         for (unsigned int i = 0; i < vValue.size(); ++i) {
             setCoinsRet.insert(vValue[i].second);
             nValueRet += vValue[i].first;
         }
 
         return true;
     }
 
     if (nTotalLower < nTargetValue) {
         if (coinLowestLarger.second.first == nullptr) {
             return false;
         }
 
         setCoinsRet.insert(coinLowestLarger.second);
         nValueRet += coinLowestLarger.first;
         return true;
     }
 
     // Solve subset sum by stochastic approximation
     std::sort(vValue.begin(), vValue.end(), CompareValueOnly());
     std::reverse(vValue.begin(), vValue.end());
     std::vector<char> vfBest;
     Amount nBest;
 
     ApproximateBestSubset(vValue, nTotalLower, nTargetValue, vfBest, nBest);
     if (nBest != nTargetValue && nTotalLower >= nTargetValue + MIN_CHANGE) {
         ApproximateBestSubset(vValue, nTotalLower, nTargetValue + MIN_CHANGE,
                               vfBest, nBest);
     }
 
     // If we have a bigger coin and (either the stochastic approximation didn't
     // find a good solution, or the next bigger coin is closer), return the
     // bigger coin.
     if (coinLowestLarger.second.first &&
         ((nBest != nTargetValue && nBest < nTargetValue + MIN_CHANGE) ||
          coinLowestLarger.first <= nBest)) {
         setCoinsRet.insert(coinLowestLarger.second);
         nValueRet += coinLowestLarger.first;
     } else {
         for (unsigned int i = 0; i < vValue.size(); i++) {
             if (vfBest[i]) {
                 setCoinsRet.insert(vValue[i].second);
                 nValueRet += vValue[i].first;
             }
         }
 
         if (LogAcceptCategory(BCLog::SELECTCOINS)) {
             LogPrint(BCLog::SELECTCOINS, "SelectCoins() best subset: ");
             for (size_t i = 0; i < vValue.size(); i++) {
                 if (vfBest[i]) {
                     LogPrint(BCLog::SELECTCOINS, "%s ",
                              FormatMoney(vValue[i].first));
                 }
             }
             LogPrint(BCLog::SELECTCOINS, "total %s\n", FormatMoney(nBest));
         }
     }
 
     return true;
 }
 
 bool CWallet::SelectCoins(
     const std::vector<COutput> &vAvailableCoins, const Amount nTargetValue,
     std::set<std::pair<const CWalletTx *, unsigned int>> &setCoinsRet,
     Amount &nValueRet, const CCoinControl *coinControl) const {
     std::vector<COutput> vCoins(vAvailableCoins);
 
     // coin control -> return all selected outputs (we want all selected to go
     // into the transaction for sure).
     if (coinControl && coinControl->HasSelected() &&
         !coinControl->fAllowOtherInputs) {
         for (const COutput &out : vCoins) {
             if (!out.fSpendable) {
                 continue;
             }
 
             nValueRet += out.tx->tx->vout[out.i].nValue;
             setCoinsRet.insert(std::make_pair(out.tx, out.i));
         }
 
         return (nValueRet >= nTargetValue);
     }
 
     // Calculate value from preset inputs and store them.
     std::set<std::pair<const CWalletTx *, uint32_t>> setPresetCoins;
     Amount nValueFromPresetInputs(0);
 
     std::vector<COutPoint> vPresetInputs;
     if (coinControl) {
         coinControl->ListSelected(vPresetInputs);
     }
 
     for (const COutPoint &outpoint : vPresetInputs) {
         std::map<uint256, CWalletTx>::const_iterator it =
             mapWallet.find(outpoint.hash);
         if (it == mapWallet.end()) {
             // TODO: Allow non-wallet inputs
             return false;
         }
 
         const CWalletTx *pcoin = &it->second;
         // Clearly invalid input, fail.
         if (pcoin->tx->vout.size() <= outpoint.n) {
             return false;
         }
 
         nValueFromPresetInputs += pcoin->tx->vout[outpoint.n].nValue;
         setPresetCoins.insert(std::make_pair(pcoin, outpoint.n));
     }
 
     // Remove preset inputs from vCoins.
     for (std::vector<COutput>::iterator it = vCoins.begin();
          it != vCoins.end() && coinControl && coinControl->HasSelected();) {
         if (setPresetCoins.count(std::make_pair(it->tx, it->i))) {
             it = vCoins.erase(it);
         } else {
             ++it;
         }
     }
 
     size_t nMaxChainLength =
         std::min(GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT),
                  GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
     bool fRejectLongChains = GetBoolArg("-walletrejectlongchains",
                                         DEFAULT_WALLET_REJECT_LONG_CHAINS);
 
     bool res =
         nTargetValue <= nValueFromPresetInputs ||
         SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 1, 6, 0,
                            vCoins, setCoinsRet, nValueRet) ||
         SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 1, 1, 0,
                            vCoins, setCoinsRet, nValueRet) ||
         (bSpendZeroConfChange &&
          SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1, 2,
                             vCoins, setCoinsRet, nValueRet)) ||
         (bSpendZeroConfChange &&
          SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1,
                             std::min((size_t)4, nMaxChainLength / 3), vCoins,
                             setCoinsRet, nValueRet)) ||
         (bSpendZeroConfChange &&
          SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1,
                             nMaxChainLength / 2, vCoins, setCoinsRet,
                             nValueRet)) ||
         (bSpendZeroConfChange &&
          SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1,
                             nMaxChainLength, vCoins, setCoinsRet, nValueRet)) ||
         (bSpendZeroConfChange && !fRejectLongChains &&
          SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, 0, 1,
                             std::numeric_limits<uint64_t>::max(), vCoins,
                             setCoinsRet, nValueRet));
 
     // Because SelectCoinsMinConf clears the setCoinsRet, we now add the
     // possible inputs to the coinset.
     setCoinsRet.insert(setPresetCoins.begin(), setPresetCoins.end());
 
     // Add preset inputs to the total value selected.
     nValueRet += nValueFromPresetInputs;
 
     return res;
 }
 
 bool CWallet::FundTransaction(CMutableTransaction &tx, Amount &nFeeRet,
                               bool overrideEstimatedFeeRate,
                               const CFeeRate &specificFeeRate,
                               int &nChangePosInOut, std::string &strFailReason,
                               bool includeWatching, bool lockUnspents,
                               const std::set<int> &setSubtractFeeFromOutputs,
                               bool keepReserveKey,
                               const CTxDestination &destChange) {
     std::vector<CRecipient> vecSend;
 
     // Turn the txout set into a CRecipient vector.
     for (size_t idx = 0; idx < tx.vout.size(); idx++) {
         const CTxOut &txOut = tx.vout[idx];
         CRecipient recipient = {txOut.scriptPubKey, txOut.nValue,
                                 setSubtractFeeFromOutputs.count(idx) == 1};
         vecSend.push_back(recipient);
     }
 
     CCoinControl coinControl;
     coinControl.destChange = destChange;
     coinControl.fAllowOtherInputs = true;
     coinControl.fAllowWatchOnly = includeWatching;
     coinControl.fOverrideFeeRate = overrideEstimatedFeeRate;
     coinControl.nFeeRate = specificFeeRate;
 
     for (const CTxIn &txin : tx.vin) {
         coinControl.Select(txin.prevout);
     }
 
     CReserveKey reservekey(this);
     CWalletTx wtx;
     if (!CreateTransaction(vecSend, wtx, reservekey, nFeeRet, nChangePosInOut,
                            strFailReason, &coinControl, false)) {
         return false;
     }
 
     if (nChangePosInOut != -1) {
         tx.vout.insert(tx.vout.begin() + nChangePosInOut,
                        wtx.tx->vout[nChangePosInOut]);
     }
 
     // Copy output sizes from new transaction; they may have had the fee
     // subtracted from them.
     for (size_t idx = 0; idx < tx.vout.size(); idx++) {
         tx.vout[idx].nValue = wtx.tx->vout[idx].nValue;
     }
 
     // Add new txins (keeping original txin scriptSig/order)
     for (const CTxIn &txin : wtx.tx->vin) {
         if (!coinControl.IsSelected(txin.prevout)) {
             tx.vin.push_back(txin);
 
             if (lockUnspents) {
                 LOCK2(cs_main, cs_wallet);
                 LockCoin(txin.prevout);
             }
         }
     }
 
     // Optionally keep the change output key.
     if (keepReserveKey) {
         reservekey.KeepKey();
     }
 
     return true;
 }
 
 bool CWallet::CreateTransaction(const std::vector<CRecipient> &vecSend,
                                 CWalletTx &wtxNew, CReserveKey &reservekey,
                                 Amount &nFeeRet, int &nChangePosInOut,
                                 std::string &strFailReason,
                                 const CCoinControl *coinControl, bool sign) {
     Amount nValue(0);
     int nChangePosRequest = nChangePosInOut;
     unsigned int nSubtractFeeFromAmount = 0;
     for (const auto &recipient : vecSend) {
         if (nValue < Amount(0) || recipient.nAmount < Amount(0)) {
             strFailReason = _("Transaction amounts must not be negative");
             return false;
         }
 
         nValue += recipient.nAmount;
 
         if (recipient.fSubtractFeeFromAmount) {
             nSubtractFeeFromAmount++;
         }
     }
 
     if (vecSend.empty()) {
         strFailReason = _("Transaction must have at least one recipient");
         return false;
     }
 
     wtxNew.fTimeReceivedIsTxTime = true;
     wtxNew.BindWallet(this);
     CMutableTransaction txNew;
 
     // Discourage fee sniping.
     //
     // For a large miner the value of the transactions in the best block and the
     // mempool can exceed the cost of deliberately attempting to mine two blocks
     // to orphan the current best block. By setting nLockTime such that only the
     // next block can include the transaction, we discourage this practice as
     // the height restricted and limited blocksize gives miners considering fee
     // sniping fewer options for pulling off this attack.
     //
     // A simple way to think about this is from the wallet's point of view we
     // always want the blockchain to move forward. By setting nLockTime this way
     // we're basically making the statement that we only want this transaction
     // to appear in the next block; we don't want to potentially encourage
     // reorgs by allowing transactions to appear at lower heights than the next
     // block in forks of the best chain.
     //
     // Of course, the subsidy is high enough, and transaction volume low enough,
     // that fee sniping isn't a problem yet, but by implementing a fix now we
     // ensure code won't be written that makes assumptions about nLockTime that
     // preclude a fix later.
     txNew.nLockTime = chainActive.Height();
 
     // Secondly occasionally randomly pick a nLockTime even further back, so
     // that transactions that are delayed after signing for whatever reason,
     // e.g. high-latency mix networks and some CoinJoin implementations, have
     // better privacy.
     if (GetRandInt(10) == 0) {
         txNew.nLockTime = std::max(0, (int)txNew.nLockTime - GetRandInt(100));
     }
 
     assert(txNew.nLockTime <= (unsigned int)chainActive.Height());
     assert(txNew.nLockTime < LOCKTIME_THRESHOLD);
 
     {
         std::set<std::pair<const CWalletTx *, unsigned int>> setCoins;
         LOCK2(cs_main, cs_wallet);
 
         std::vector<COutput> vAvailableCoins;
         AvailableCoins(vAvailableCoins, true, coinControl);
 
         nFeeRet = Amount(0);
         // Start with no fee and loop until there is enough fee.
         while (true) {
             nChangePosInOut = nChangePosRequest;
             txNew.vin.clear();
             txNew.vout.clear();
             wtxNew.fFromMe = true;
             bool fFirst = true;
 
             Amount nValueToSelect = nValue;
             if (nSubtractFeeFromAmount == 0) {
                 nValueToSelect += nFeeRet;
             }
 
             double dPriority = 0;
             // vouts to the payees
             for (const auto &recipient : vecSend) {
                 CTxOut txout(recipient.nAmount, recipient.scriptPubKey);
 
                 if (recipient.fSubtractFeeFromAmount) {
                     // Subtract fee equally from each selected recipient.
                     txout.nValue -= nFeeRet / int(nSubtractFeeFromAmount);
 
                     // First receiver pays the remainder not divisible by output
                     // count.
                     if (fFirst) {
                         fFirst = false;
                         txout.nValue -= nFeeRet % int(nSubtractFeeFromAmount);
                     }
                 }
 
                 if (txout.IsDust(dustRelayFee)) {
                     if (recipient.fSubtractFeeFromAmount &&
                         nFeeRet > Amount(0)) {
                         if (txout.nValue < Amount(0)) {
                             strFailReason = _("The transaction amount is "
                                               "too small to pay the fee");
                         } else {
                             strFailReason =
                                 _("The transaction amount is too small to "
                                   "send after the fee has been deducted");
                         }
                     } else {
                         strFailReason = _("Transaction amount too small");
                     }
 
                     return false;
                 }
 
                 txNew.vout.push_back(txout);
             }
 
             // Choose coins to use.
             Amount nValueIn(0);
             setCoins.clear();
             if (!SelectCoins(vAvailableCoins, nValueToSelect, setCoins,
                              nValueIn, coinControl)) {
                 strFailReason = _("Insufficient funds");
                 return false;
             }
 
             for (const auto &pcoin : setCoins) {
                 Amount nCredit = pcoin.first->tx->vout[pcoin.second].nValue;
                 // The coin age after the next block (depth+1) is used instead
                 // of the current, reflecting an assumption the user would
                 // accept a bit more delay for a chance at a free transaction.
                 // But mempool inputs might still be in the mempool, so their
                 // age stays 0.
                 int age = pcoin.first->GetDepthInMainChain();
                 assert(age >= 0);
                 if (age != 0) age += 1;
                 dPriority += (double)nCredit.GetSatoshis() * age;
             }
 
             const Amount nChange = nValueIn - nValueToSelect;
             if (nChange > Amount(0)) {
                 // Fill a vout to ourself.
                 // TODO: pass in scriptChange instead of reservekey so change
                 // transaction isn't always pay-to-bitcoin-address.
                 CScript scriptChange;
 
                 // Coin control: send change to custom address.
                 if (coinControl &&
                     !boost::get<CNoDestination>(&coinControl->destChange)) {
                     scriptChange =
                         GetScriptForDestination(coinControl->destChange);
 
                     // No coin control: send change to newly generated address.
                 } else {
                     // Note: We use a new key here to keep it from being obvious
                     // which side is the change. The drawback is that by not
                     // reusing a previous key, the change may be lost if a
                     // backup is restored, if the backup doesn't have the new
                     // private key for the change. If we reused the old key, it
                     // would be possible to add code to look for and rediscover
                     // unknown transactions that were written with keys of ours
                     // to recover post-backup change.
 
                     // Reserve a new key pair from key pool.
                     CPubKey vchPubKey;
                     bool ret;
                     ret = reservekey.GetReservedKey(vchPubKey, true);
                     if (!ret) {
                         strFailReason = _("Keypool ran out, please call "
                                           "keypoolrefill first");
                         return false;
                     }
 
                     scriptChange = GetScriptForDestination(vchPubKey.GetID());
                 }
 
                 CTxOut newTxOut(nChange, scriptChange);
 
                 // We do not move dust-change to fees, because the sender would
                 // end up paying more than requested. This would be against the
                 // purpose of the all-inclusive feature. So instead we raise the
                 // change and deduct from the recipient.
                 if (nSubtractFeeFromAmount > 0 &&
                     newTxOut.IsDust(dustRelayFee)) {
                     Amount nDust = newTxOut.GetDustThreshold(dustRelayFee) -
                                    newTxOut.nValue;
                     // Raise change until no more dust.
                     newTxOut.nValue += nDust;
                     // Subtract from first recipient.
                     for (unsigned int i = 0; i < vecSend.size(); i++) {
                         if (vecSend[i].fSubtractFeeFromAmount) {
                             txNew.vout[i].nValue -= nDust;
                             if (txNew.vout[i].IsDust(dustRelayFee)) {
                                 strFailReason =
                                     _("The transaction amount is too small "
                                       "to send after the fee has been "
                                       "deducted");
                                 return false;
                             }
 
                             break;
                         }
                     }
                 }
 
                 // Never create dust outputs; if we would, just add the dust to
                 // the fee.
                 if (newTxOut.IsDust(dustRelayFee)) {
                     nChangePosInOut = -1;
                     nFeeRet += nChange;
                     reservekey.ReturnKey();
                 } else {
                     if (nChangePosInOut == -1) {
                         // Insert change txn at random position:
                         nChangePosInOut = GetRandInt(txNew.vout.size() + 1);
                     } else if ((unsigned int)nChangePosInOut >
                                txNew.vout.size()) {
                         strFailReason = _("Change index out of range");
                         return false;
                     }
 
                     std::vector<CTxOut>::iterator position =
                         txNew.vout.begin() + nChangePosInOut;
                     txNew.vout.insert(position, newTxOut);
                 }
             } else {
                 reservekey.ReturnKey();
             }
 
             // Fill vin
             //
             // Note how the sequence number is set to non-maxint so that the
             // nLockTime set above actually works.
             for (const auto &coin : setCoins) {
                 txNew.vin.push_back(
                     CTxIn(coin.first->GetId(), coin.second, CScript(),
                           std::numeric_limits<unsigned int>::max() - 1));
             }
 
             // Fill in dummy signatures for fee calculation.
             if (!DummySignTx(txNew, setCoins)) {
                 strFailReason = _("Signing transaction failed");
                 return false;
             }
 
             unsigned int nBytes = GetTransactionSize(txNew);
 
             CTransaction txNewConst(txNew);
             dPriority = txNewConst.ComputePriority(dPriority, nBytes);
 
             // Remove scriptSigs to eliminate the fee calculation dummy
             // signatures.
             for (auto &vin : txNew.vin) {
                 vin.scriptSig = CScript();
             }
 
             // Allow to override the default confirmation target over the
             // CoinControl instance.
             int currentConfirmationTarget = nTxConfirmTarget;
             if (coinControl && coinControl->nConfirmTarget > 0) {
                 currentConfirmationTarget = coinControl->nConfirmTarget;
             }
 
             // Can we complete this as a free transaction?
             if (fSendFreeTransactions &&
                 nBytes <= MAX_FREE_TRANSACTION_CREATE_SIZE) {
                 // Not enough fee: enough priority?
                 double dPriorityNeeded =
                     mempool.estimateSmartPriority(currentConfirmationTarget);
                 // Require at least hard-coded AllowFree.
                 if (dPriority >= dPriorityNeeded && AllowFree(dPriority)) {
                     break;
                 }
             }
 
             Amount nFeeNeeded =
                 GetMinimumFee(nBytes, currentConfirmationTarget, mempool);
             if (coinControl && nFeeNeeded > Amount(0) &&
                 coinControl->nMinimumTotalFee > nFeeNeeded) {
                 nFeeNeeded = coinControl->nMinimumTotalFee;
             }
 
             if (coinControl && coinControl->fOverrideFeeRate) {
                 nFeeNeeded = coinControl->nFeeRate.GetFee(nBytes);
             }
 
             // If we made it here and we aren't even able to meet the relay fee
             // on the next pass, give up because we must be at the maximum
             // allowed fee.
             if (nFeeNeeded < ::minRelayTxFee.GetFee(nBytes)) {
                 strFailReason = _("Transaction too large for fee policy");
                 return false;
             }
 
             if (nFeeRet >= nFeeNeeded) {
                 // Reduce fee to only the needed amount if we have change output
                 // to increase. This prevents potential overpayment in fees if
                 // the coins selected to meet nFeeNeeded result in a transaction
                 // that requires less fee than the prior iteration.
                 // TODO: The case where nSubtractFeeFromAmount > 0 remains to be
                 // addressed because it requires returning the fee to the payees
                 // and not the change output.
                 // TODO: The case where there is no change output remains to be
                 // addressed so we avoid creating too small an output.
                 if (nFeeRet > nFeeNeeded && nChangePosInOut != -1 &&
                     nSubtractFeeFromAmount == 0) {
                     Amount extraFeePaid = nFeeRet - nFeeNeeded;
                     std::vector<CTxOut>::iterator change_position =
                         txNew.vout.begin() + nChangePosInOut;
                     change_position->nValue += extraFeePaid;
                     nFeeRet -= extraFeePaid;
                 }
 
                 // Done, enough fee included.
                 break;
             }
 
             // Try to reduce change to include necessary fee.
             if (nChangePosInOut != -1 && nSubtractFeeFromAmount == 0) {
                 Amount additionalFeeNeeded = nFeeNeeded - nFeeRet;
                 std::vector<CTxOut>::iterator change_position =
                     txNew.vout.begin() + nChangePosInOut;
                 // Only reduce change if remaining amount is still a large
                 // enough output.
                 if (change_position->nValue >=
                     MIN_FINAL_CHANGE + additionalFeeNeeded) {
                     change_position->nValue -= additionalFeeNeeded;
                     nFeeRet += additionalFeeNeeded;
                     // Done, able to increase fee from change.
                     break;
                 }
             }
 
             // Include more fee and try again.
             nFeeRet = nFeeNeeded;
             continue;
         }
 
         if (sign) {
             SigHashType sigHashType = SigHashType().withForkId(true);
 
             CTransaction txNewConst(txNew);
             int nIn = 0;
             for (const auto &coin : setCoins) {
                 const CScript &scriptPubKey =
                     coin.first->tx->vout[coin.second].scriptPubKey;
                 SignatureData sigdata;
 
                 if (!ProduceSignature(
                         TransactionSignatureCreator(
                             this, &txNewConst, nIn,
                             coin.first->tx->vout[coin.second].nValue,
                             sigHashType),
                         scriptPubKey, sigdata)) {
                     strFailReason = _("Signing transaction failed");
                     return false;
                 } else {
                     UpdateTransaction(txNew, nIn, sigdata);
                 }
 
                 nIn++;
             }
         }
 
         // Embed the constructed transaction data in wtxNew.
         wtxNew.SetTx(MakeTransactionRef(std::move(txNew)));
 
         // Limit size.
         if (GetTransactionSize(wtxNew) >= MAX_STANDARD_TX_SIZE) {
             strFailReason = _("Transaction too large");
             return false;
         }
     }
 
     if (GetBoolArg("-walletrejectlongchains",
                    DEFAULT_WALLET_REJECT_LONG_CHAINS)) {
         // Lastly, ensure this tx will pass the mempool's chain limits.
         LockPoints lp;
         CTxMemPoolEntry entry(wtxNew.tx, Amount(0), 0, 0, 0, Amount(0), false,
                               0, lp);
         CTxMemPool::setEntries setAncestors;
         size_t nLimitAncestors =
             GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
         size_t nLimitAncestorSize =
             GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
         size_t nLimitDescendants =
             GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
         size_t nLimitDescendantSize =
             GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) *
             1000;
         std::string errString;
         if (!mempool.CalculateMemPoolAncestors(
                 entry, setAncestors, nLimitAncestors, nLimitAncestorSize,
                 nLimitDescendants, nLimitDescendantSize, errString)) {
             strFailReason = _("Transaction has too long of a mempool chain");
             return false;
         }
     }
 
     return true;
 }
 
 /**
  * Call after CreateTransaction unless you want to abort
  */
 bool CWallet::CommitTransaction(CWalletTx &wtxNew, CReserveKey &reservekey,
                                 CConnman *connman, CValidationState &state) {
     LOCK2(cs_main, cs_wallet);
     LogPrintf("CommitTransaction:\n%s", wtxNew.tx->ToString());
 
     // Take key pair from key pool so it won't be used again.
     reservekey.KeepKey();
 
     // Add tx to wallet, because if it has change it's also ours, otherwise just
     // for transaction history.
     AddToWallet(wtxNew);
 
     // Notify that old coins are spent.
     for (const CTxIn &txin : wtxNew.tx->vin) {
         CWalletTx &coin = mapWallet[txin.prevout.hash];
         coin.BindWallet(this);
         NotifyTransactionChanged(this, coin.GetId(), CT_UPDATED);
     }
 
     // Track how many getdata requests our transaction gets.
     mapRequestCount[wtxNew.GetId()] = 0;
 
     if (fBroadcastTransactions) {
         // Broadcast
         if (!wtxNew.AcceptToMemoryPool(maxTxFee, state)) {
             LogPrintf("CommitTransaction(): Transaction cannot be "
                       "broadcast immediately, %s\n",
                       state.GetRejectReason());
             // TODO: if we expect the failure to be long term or permanent,
             // instead delete wtx from the wallet and return failure.
         } else {
             wtxNew.RelayWalletTransaction(connman);
         }
     }
 
     return true;
 }
 
 void CWallet::ListAccountCreditDebit(const std::string &strAccount,
                                      std::list<CAccountingEntry> &entries) {
     CWalletDB walletdb(strWalletFile);
     return walletdb.ListAccountCreditDebit(strAccount, entries);
 }
 
 bool CWallet::AddAccountingEntry(const CAccountingEntry &acentry) {
     CWalletDB walletdb(strWalletFile);
 
     return AddAccountingEntry(acentry, &walletdb);
 }
 
 bool CWallet::AddAccountingEntry(const CAccountingEntry &acentry,
                                  CWalletDB *pwalletdb) {
     if (!pwalletdb->WriteAccountingEntry_Backend(acentry)) {
         return false;
     }
 
     laccentries.push_back(acentry);
     CAccountingEntry &entry = laccentries.back();
     wtxOrdered.insert(std::make_pair(entry.nOrderPos, TxPair(nullptr, &entry)));
 
     return true;
 }
 
 Amount CWallet::GetRequiredFee(unsigned int nTxBytes) {
     return std::max(minTxFee.GetFee(nTxBytes),
                     ::minRelayTxFee.GetFee(nTxBytes));
 }
 
 Amount CWallet::GetMinimumFee(unsigned int nTxBytes,
                               unsigned int nConfirmTarget,
                               const CTxMemPool &pool) {
     // payTxFee is the user-set global for desired feerate.
     return GetMinimumFee(nTxBytes, nConfirmTarget, pool,
                          payTxFee.GetFee(nTxBytes));
 }
 
 Amount CWallet::GetMinimumFee(unsigned int nTxBytes,
                               unsigned int nConfirmTarget,
                               const CTxMemPool &pool, Amount targetFee) {
     Amount nFeeNeeded = targetFee;
     // User didn't set: use -txconfirmtarget to estimate...
     if (nFeeNeeded == Amount(0)) {
         int estimateFoundTarget = nConfirmTarget;
         nFeeNeeded = pool.estimateSmartFee(nConfirmTarget, &estimateFoundTarget)
                          .GetFee(nTxBytes);
         // ... unless we don't have enough mempool data for estimatefee, then
         // use fallbackFee.
         if (nFeeNeeded == Amount(0)) {
             nFeeNeeded = fallbackFee.GetFee(nTxBytes);
         }
     }
 
     // Prevent user from paying a fee below minRelayTxFee or minTxFee.
     nFeeNeeded = std::max(nFeeNeeded, GetRequiredFee(nTxBytes));
 
     // But always obey the maximum.
     if (nFeeNeeded > maxTxFee) {
         nFeeNeeded = maxTxFee;
     }
 
     return nFeeNeeded;
 }
 
 DBErrors CWallet::LoadWallet(bool &fFirstRunRet) {
     if (!fFileBacked) {
         return DB_LOAD_OK;
     }
 
     fFirstRunRet = false;
     DBErrors nLoadWalletRet = CWalletDB(strWalletFile, "cr+").LoadWallet(this);
     if (nLoadWalletRet == DB_NEED_REWRITE) {
         if (CDB::Rewrite(strWalletFile, "\x04pool")) {
             LOCK(cs_wallet);
             setKeyPool.clear();
             // Note: can't top-up keypool here, because wallet is locked. User
             // will be prompted to unlock wallet the next operation that
             // requires a new key.
         }
     }
 
     if (nLoadWalletRet != DB_LOAD_OK) {
         return nLoadWalletRet;
     }
 
     fFirstRunRet = !vchDefaultKey.IsValid();
 
     uiInterface.LoadWallet(this);
 
     return DB_LOAD_OK;
 }
 
 DBErrors CWallet::ZapSelectTx(std::vector<uint256> &vHashIn,
                               std::vector<uint256> &vHashOut) {
     if (!fFileBacked) {
         return DB_LOAD_OK;
     }
 
     AssertLockHeld(cs_wallet); // mapWallet
     vchDefaultKey = CPubKey();
     DBErrors nZapSelectTxRet =
         CWalletDB(strWalletFile, "cr+").ZapSelectTx(vHashIn, vHashOut);
     for (uint256 hash : vHashOut) {
         mapWallet.erase(hash);
     }
 
     if (nZapSelectTxRet == DB_NEED_REWRITE) {
         if (CDB::Rewrite(strWalletFile, "\x04pool")) {
             setKeyPool.clear();
             // Note: can't top-up keypool here, because wallet is locked. User
             // will be prompted to unlock wallet the next operation that
             // requires a new key.
         }
     }
 
     if (nZapSelectTxRet != DB_LOAD_OK) {
         return nZapSelectTxRet;
     }
 
     MarkDirty();
 
     return DB_LOAD_OK;
 }
 
 DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx> &vWtx) {
     if (!fFileBacked) {
         return DB_LOAD_OK;
     }
 
     vchDefaultKey = CPubKey();
     DBErrors nZapWalletTxRet =
         CWalletDB(strWalletFile, "cr+").ZapWalletTx(vWtx);
     if (nZapWalletTxRet == DB_NEED_REWRITE) {
         if (CDB::Rewrite(strWalletFile, "\x04pool")) {
             LOCK(cs_wallet);
             setKeyPool.clear();
             // Note: can't top-up keypool here, because wallet is locked. User
             // will be prompted to unlock wallet the next operation that
             // requires a new key.
         }
     }
 
     if (nZapWalletTxRet != DB_LOAD_OK) {
         return nZapWalletTxRet;
     }
 
     return DB_LOAD_OK;
 }
 
 bool CWallet::SetAddressBook(const CTxDestination &address,
                              const std::string &strName,
                              const std::string &strPurpose) {
     bool fUpdated = false;
     {
         // mapAddressBook
         LOCK(cs_wallet);
         std::map<CTxDestination, CAddressBookData>::iterator mi =
             mapAddressBook.find(address);
         fUpdated = mi != mapAddressBook.end();
         mapAddressBook[address].name = strName;
         // Update purpose only if requested.
         if (!strPurpose.empty()) {
             mapAddressBook[address].purpose = strPurpose;
         }
     }
 
     NotifyAddressBookChanged(this, address, strName,
                              ::IsMine(*this, address) != ISMINE_NO, strPurpose,
                              (fUpdated ? CT_UPDATED : CT_NEW));
     if (!fFileBacked) {
         return false;
     }
 
     if (!strPurpose.empty() &&
         !CWalletDB(strWalletFile).WritePurpose(address, strPurpose)) {
         return false;
     }
 
     return CWalletDB(strWalletFile).WriteName(address, strName);
 }
 
 bool CWallet::DelAddressBook(const CTxDestination &address) {
     {
         // mapAddressBook
         LOCK(cs_wallet);
 
         if (fFileBacked) {
             // Delete destdata tuples associated with address.
             for (const std::pair<std::string, std::string> &item :
                  mapAddressBook[address].destdata) {
                 CWalletDB(strWalletFile).EraseDestData(address, item.first);
             }
         }
         mapAddressBook.erase(address);
     }
 
     NotifyAddressBookChanged(this, address, "",
                              ::IsMine(*this, address) != ISMINE_NO, "",
                              CT_DELETED);
 
     if (!fFileBacked) {
         return false;
     }
 
     CWalletDB(strWalletFile).ErasePurpose(address);
     return CWalletDB(strWalletFile).EraseName(address);
 }
 
 bool CWallet::SetDefaultKey(const CPubKey &vchPubKey) {
     if (fFileBacked && !CWalletDB(strWalletFile).WriteDefaultKey(vchPubKey)) {
         return false;
     }
 
     vchDefaultKey = vchPubKey;
     return true;
 }
 
 /**
  * Mark old keypool keys as used, and generate all new keys.
  */
 bool CWallet::NewKeyPool() {
     LOCK(cs_wallet);
     CWalletDB walletdb(strWalletFile);
     for (int64_t nIndex : setKeyPool) {
         walletdb.ErasePool(nIndex);
     }
     setKeyPool.clear();
 
     if (!TopUpKeyPool()) {
         return false;
     }
 
     LogPrintf("CWallet::NewKeyPool rewrote keypool\n");
     return true;
 }
 
 size_t CWallet::KeypoolCountExternalKeys() {
     // setKeyPool
     AssertLockHeld(cs_wallet);
 
     // immediately return setKeyPool's size if HD or HD_SPLIT is disabled or not
     // supported
     if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT))
         return setKeyPool.size();
 
     CWalletDB walletdb(strWalletFile);
 
     // count amount of external keys
     size_t amountE = 0;
     for (const int64_t &id : setKeyPool) {
         CKeyPool tmpKeypool;
         if (!walletdb.ReadPool(id, tmpKeypool)) {
             throw std::runtime_error(std::string(__func__) + ": read failed");
         }
         amountE += !tmpKeypool.fInternal;
     }
 
     return amountE;
 }
 
 bool CWallet::TopUpKeyPool(unsigned int kpSize) {
     LOCK(cs_wallet);
 
     if (IsLocked()) {
         return false;
     }
 
     // Top up key pool.
     unsigned int nTargetSize;
     if (kpSize > 0) {
         nTargetSize = kpSize;
     } else {
         nTargetSize =
             std::max(GetArg("-keypool", DEFAULT_KEYPOOL_SIZE), int64_t(0));
     }
 
     // count amount of available keys (internal, external)
     // make sure the keypool of external and internal keys fits the user
     // selected target (-keypool)
     int64_t amountExternal = KeypoolCountExternalKeys();
     int64_t amountInternal = setKeyPool.size() - amountExternal;
     int64_t missingExternal =
         std::max(std::max(int64_t(nTargetSize), int64_t(1)) - amountExternal,
                  int64_t(0));
     int64_t missingInternal =
         std::max(std::max(int64_t(nTargetSize), int64_t(1)) - amountInternal,
                  int64_t(0));
 
     if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT)) {
         // don't create extra internal keys
         missingInternal = 0;
     }
     bool internal = false;
     CWalletDB walletdb(strWalletFile);
     for (int64_t i = missingInternal + missingExternal; i--;) {
         int64_t nEnd = 1;
         if (i < missingInternal) {
             internal = true;
         }
         if (!setKeyPool.empty()) {
             nEnd = *(--setKeyPool.end()) + 1;
         }
         if (!walletdb.WritePool(nEnd,
                                 CKeyPool(GenerateNewKey(internal), internal))) {
             throw std::runtime_error(std::string(__func__) +
                                      ": writing generated key failed");
         }
         setKeyPool.insert(nEnd);
         LogPrintf("keypool added key %d, size=%u, internal=%d\n", nEnd,
                   setKeyPool.size(), internal);
     }
 
     return true;
 }
 
 void CWallet::ReserveKeyFromKeyPool(int64_t &nIndex, CKeyPool &keypool,
                                     bool internal) {
     nIndex = -1;
     keypool.vchPubKey = CPubKey();
 
     LOCK(cs_wallet);
 
     if (!IsLocked()) {
         TopUpKeyPool();
     }
 
     // Get the oldest key.
     if (setKeyPool.empty()) {
         return;
     }
 
     CWalletDB walletdb(strWalletFile);
 
     // try to find a key that matches the internal/external filter
     for (const int64_t &id : setKeyPool) {
         CKeyPool tmpKeypool;
         if (!walletdb.ReadPool(id, tmpKeypool)) {
             throw std::runtime_error(std::string(__func__) + ": read failed");
         }
         if (!HaveKey(tmpKeypool.vchPubKey.GetID())) {
             throw std::runtime_error(std::string(__func__) +
                                      ": unknown key in key pool");
         }
         if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT) ||
             tmpKeypool.fInternal == internal) {
             nIndex = id;
             keypool = tmpKeypool;
             setKeyPool.erase(id);
             assert(keypool.vchPubKey.IsValid());
             LogPrintf("keypool reserve %d\n", nIndex);
             return;
         }
     }
 }
 
 void CWallet::KeepKey(int64_t nIndex) {
     // Remove from key pool.
     if (fFileBacked) {
         CWalletDB walletdb(strWalletFile);
         walletdb.ErasePool(nIndex);
     }
 
     LogPrintf("keypool keep %d\n", nIndex);
 }
 
 void CWallet::ReturnKey(int64_t nIndex) {
     // Return to key pool.
     {
         LOCK(cs_wallet);
         setKeyPool.insert(nIndex);
     }
 
     LogPrintf("keypool return %d\n", nIndex);
 }
 
 bool CWallet::GetKeyFromPool(CPubKey &result, bool internal) {
     int64_t nIndex = 0;
     CKeyPool keypool;
 
     LOCK(cs_wallet);
     ReserveKeyFromKeyPool(nIndex, keypool, internal);
     if (nIndex == -1) {
         if (IsLocked()) {
             return false;
         }
         result = GenerateNewKey(internal);
         return true;
     }
 
     KeepKey(nIndex);
     result = keypool.vchPubKey;
 
     return true;
 }
 
 int64_t CWallet::GetOldestKeyPoolTime() {
     LOCK(cs_wallet);
 
     // If the keypool is empty, return <NOW>
     if (setKeyPool.empty()) {
         return GetTime();
     }
 
     CKeyPool keypool;
     CWalletDB walletdb(strWalletFile);
 
     if (IsHDEnabled() && CanSupportFeature(FEATURE_HD_SPLIT)) {
         // if HD & HD Chain Split is enabled, response max(oldest-internal-key,
         // oldest-external-key)
         int64_t now = GetTime();
         int64_t oldest_external = now, oldest_internal = now;
 
         for (const int64_t &id : setKeyPool) {
             if (!walletdb.ReadPool(id, keypool)) {
                 throw std::runtime_error(std::string(__func__) +
                                          ": read failed");
             }
             if (keypool.fInternal && keypool.nTime < oldest_internal) {
                 oldest_internal = keypool.nTime;
             } else if (!keypool.fInternal && keypool.nTime < oldest_external) {
                 oldest_external = keypool.nTime;
             }
             if (oldest_internal != now && oldest_external != now) {
                 break;
             }
         }
         return std::max(oldest_internal, oldest_external);
     }
 
     // Load oldest key from keypool, get time and return.
     int64_t nIndex = *(setKeyPool.begin());
     if (!walletdb.ReadPool(nIndex, keypool)) {
         throw std::runtime_error(std::string(__func__) +
                                  ": read oldest key in keypool failed");
     }
 
     assert(keypool.vchPubKey.IsValid());
     return keypool.nTime;
 }
 
 std::map<CTxDestination, Amount> CWallet::GetAddressBalances() {
     std::map<CTxDestination, Amount> balances;
 
     LOCK(cs_wallet);
     for (std::pair<uint256, CWalletTx> walletEntry : mapWallet) {
         CWalletTx *pcoin = &walletEntry.second;
 
         if (!pcoin->IsTrusted()) {
             continue;
         }
 
         if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0) {
             continue;
         }
 
         int nDepth = pcoin->GetDepthInMainChain();
         if (nDepth < (pcoin->IsFromMe(ISMINE_ALL) ? 0 : 1)) {
             continue;
         }
 
         for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) {
             CTxDestination addr;
             if (!IsMine(pcoin->tx->vout[i])) {
                 continue;
             }
 
             if (!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, addr)) {
                 continue;
             }
 
             Amount n = IsSpent(walletEntry.first, i)
                            ? Amount(0)
                            : pcoin->tx->vout[i].nValue;
 
             if (!balances.count(addr)) balances[addr] = Amount(0);
             balances[addr] += n;
         }
     }
 
     return balances;
 }
 
 std::set<std::set<CTxDestination>> CWallet::GetAddressGroupings() {
     // mapWallet
     AssertLockHeld(cs_wallet);
     std::set<std::set<CTxDestination>> groupings;
     std::set<CTxDestination> grouping;
 
     for (std::pair<uint256, CWalletTx> walletEntry : mapWallet) {
         CWalletTx *pcoin = &walletEntry.second;
 
         if (pcoin->tx->vin.size() > 0) {
             bool any_mine = false;
             // Group all input addresses with each other.
             for (CTxIn txin : pcoin->tx->vin) {
                 CTxDestination address;
                 // If this input isn't mine, ignore it.
                 if (!IsMine(txin)) {
                     continue;
                 }
 
                 if (!ExtractDestination(mapWallet[txin.prevout.hash]
                                             .tx->vout[txin.prevout.n]
                                             .scriptPubKey,
                                         address)) {
                     continue;
                 }
 
                 grouping.insert(address);
                 any_mine = true;
             }
 
             // Group change with input addresses.
             if (any_mine) {
                 for (CTxOut txout : pcoin->tx->vout) {
                     if (IsChange(txout)) {
                         CTxDestination txoutAddr;
                         if (!ExtractDestination(txout.scriptPubKey,
                                                 txoutAddr)) {
                             continue;
                         }
 
                         grouping.insert(txoutAddr);
                     }
                 }
             }
 
             if (grouping.size() > 0) {
                 groupings.insert(grouping);
                 grouping.clear();
             }
         }
 
         // Group lone addrs by themselves.
         for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++)
             if (IsMine(pcoin->tx->vout[i])) {
                 CTxDestination address;
                 if (!ExtractDestination(pcoin->tx->vout[i].scriptPubKey,
                                         address)) {
                     continue;
                 }
 
                 grouping.insert(address);
                 groupings.insert(grouping);
                 grouping.clear();
             }
     }
 
     // A set of pointers to groups of addresses.
     std::set<std::set<CTxDestination> *> uniqueGroupings;
     // Map addresses to the unique group containing it.
     std::map<CTxDestination, std::set<CTxDestination> *> setmap;
     for (std::set<CTxDestination> _grouping : groupings) {
         // Make a set of all the groups hit by this new group.
         std::set<std::set<CTxDestination> *> hits;
         std::map<CTxDestination, std::set<CTxDestination> *>::iterator it;
         for (CTxDestination address : _grouping) {
             if ((it = setmap.find(address)) != setmap.end())
                 hits.insert((*it).second);
         }
 
         // Merge all hit groups into a new single group and delete old groups.
         std::set<CTxDestination> *merged =
             new std::set<CTxDestination>(_grouping);
         for (std::set<CTxDestination> *hit : hits) {
             merged->insert(hit->begin(), hit->end());
             uniqueGroupings.erase(hit);
             delete hit;
         }
         uniqueGroupings.insert(merged);
 
         // Update setmap.
         for (CTxDestination element : *merged) {
             setmap[element] = merged;
         }
     }
 
     std::set<std::set<CTxDestination>> ret;
     for (std::set<CTxDestination> *uniqueGrouping : uniqueGroupings) {
         ret.insert(*uniqueGrouping);
         delete uniqueGrouping;
     }
 
     return ret;
 }
 
 Amount CWallet::GetAccountBalance(const std::string &strAccount, int nMinDepth,
                                   const isminefilter &filter) {
     CWalletDB walletdb(strWalletFile);
     return GetAccountBalance(walletdb, strAccount, nMinDepth, filter);
 }
 
 Amount CWallet::GetAccountBalance(CWalletDB &walletdb,
                                   const std::string &strAccount, int nMinDepth,
                                   const isminefilter &filter) {
     Amount nBalance(0);
 
     // Tally wallet transactions.
     for (std::map<uint256, CWalletTx>::iterator it = mapWallet.begin();
          it != mapWallet.end(); ++it) {
         const CWalletTx &wtx = (*it).second;
         if (!CheckFinalTx(wtx) || wtx.GetBlocksToMaturity() > 0 ||
             wtx.GetDepthInMainChain() < 0) {
             continue;
         }
 
         Amount nReceived, nSent, nFee;
         wtx.GetAccountAmounts(strAccount, nReceived, nSent, nFee, filter);
 
         if (nReceived != Amount(0) && wtx.GetDepthInMainChain() >= nMinDepth) {
             nBalance += nReceived;
         }
 
         nBalance -= nSent + nFee;
     }
 
     // Tally internal accounting entries.
     nBalance += walletdb.GetAccountCreditDebit(strAccount);
 
     return nBalance;
 }
 
 std::set<CTxDestination>
 CWallet::GetAccountAddresses(const std::string &strAccount) const {
     LOCK(cs_wallet);
     std::set<CTxDestination> result;
     for (const std::pair<CTxDestination, CAddressBookData> &item :
          mapAddressBook) {
         const CTxDestination &address = item.first;
         const std::string &strName = item.second.name;
         if (strName == strAccount) {
             result.insert(address);
         }
     }
 
     return result;
 }
 
 bool CReserveKey::GetReservedKey(CPubKey &pubkey, bool internal) {
     if (nIndex == -1) {
         CKeyPool keypool;
         pwallet->ReserveKeyFromKeyPool(nIndex, keypool, internal);
         if (nIndex != -1) {
             vchPubKey = keypool.vchPubKey;
         } else {
             return false;
         }
     }
 
     assert(vchPubKey.IsValid());
     pubkey = vchPubKey;
     return true;
 }
 
 void CReserveKey::KeepKey() {
     if (nIndex != -1) {
         pwallet->KeepKey(nIndex);
     }
 
     nIndex = -1;
     vchPubKey = CPubKey();
 }
 
 void CReserveKey::ReturnKey() {
     if (nIndex != -1) {
         pwallet->ReturnKey(nIndex);
     }
 
     nIndex = -1;
     vchPubKey = CPubKey();
 }
 
 void CWallet::GetAllReserveKeys(std::set<CKeyID> &setAddress) const {
     setAddress.clear();
 
     CWalletDB walletdb(strWalletFile);
 
     LOCK2(cs_main, cs_wallet);
     for (const int64_t &id : setKeyPool) {
         CKeyPool keypool;
         if (!walletdb.ReadPool(id, keypool)) {
             throw std::runtime_error(std::string(__func__) + ": read failed");
         }
 
         assert(keypool.vchPubKey.IsValid());
         CKeyID keyID = keypool.vchPubKey.GetID();
         if (!HaveKey(keyID)) {
             throw std::runtime_error(std::string(__func__) +
                                      ": unknown key in key pool");
         }
 
         setAddress.insert(keyID);
     }
 }
 
-void CWallet::UpdatedTransaction(const uint256 &hashTx) {
-    LOCK(cs_wallet);
-    // Only notify UI if this transaction is in this wallet.
-    std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(hashTx);
-    if (mi != mapWallet.end()) {
-        NotifyTransactionChanged(this, hashTx, CT_UPDATED);
-    }
-}
-
 void CWallet::GetScriptForMining(std::shared_ptr<CReserveScript> &script) {
     std::shared_ptr<CReserveKey> rKey = std::make_shared<CReserveKey>(this);
     CPubKey pubkey;
     if (!rKey->GetReservedKey(pubkey)) {
         return;
     }
 
     script = rKey;
     script->reserveScript = CScript() << ToByteVector(pubkey) << OP_CHECKSIG;
 }
 
 void CWallet::LockCoin(const COutPoint &output) {
     // setLockedCoins
     AssertLockHeld(cs_wallet);
     setLockedCoins.insert(output);
 }
 
 void CWallet::UnlockCoin(const COutPoint &output) {
     // setLockedCoins
     AssertLockHeld(cs_wallet);
     setLockedCoins.erase(output);
 }
 
 void CWallet::UnlockAllCoins() {
     // setLockedCoins
     AssertLockHeld(cs_wallet);
     setLockedCoins.clear();
 }
 
 bool CWallet::IsLockedCoin(uint256 hash, unsigned int n) const {
     // setLockedCoins
     AssertLockHeld(cs_wallet);
     COutPoint outpt(hash, n);
 
     return setLockedCoins.count(outpt) > 0;
 }
 
 void CWallet::ListLockedCoins(std::vector<COutPoint> &vOutpts) {
     // setLockedCoins
     AssertLockHeld(cs_wallet);
     for (std::set<COutPoint>::iterator it = setLockedCoins.begin();
          it != setLockedCoins.end(); it++) {
         COutPoint outpt = (*it);
         vOutpts.push_back(outpt);
     }
 }
 
 /** @} */ // end of Actions
 
 class CAffectedKeysVisitor : public boost::static_visitor<void> {
 private:
     const CKeyStore &keystore;
     std::vector<CKeyID> &vKeys;
 
 public:
     CAffectedKeysVisitor(const CKeyStore &keystoreIn,
                          std::vector<CKeyID> &vKeysIn)
         : keystore(keystoreIn), vKeys(vKeysIn) {}
 
     void Process(const CScript &script) {
         txnouttype type;
         std::vector<CTxDestination> vDest;
         int nRequired;
         if (ExtractDestinations(script, type, vDest, nRequired)) {
             for (const CTxDestination &dest : vDest) {
                 boost::apply_visitor(*this, dest);
             }
         }
     }
 
     void operator()(const CKeyID &keyId) {
         if (keystore.HaveKey(keyId)) {
             vKeys.push_back(keyId);
         }
     }
 
     void operator()(const CScriptID &scriptId) {
         CScript script;
         if (keystore.GetCScript(scriptId, script)) {
             Process(script);
         }
     }
 
     void operator()(const CNoDestination &none) {}
 };
 
 void CWallet::GetKeyBirthTimes(
     std::map<CTxDestination, int64_t> &mapKeyBirth) const {
     // mapKeyMetadata
     AssertLockHeld(cs_wallet);
     mapKeyBirth.clear();
 
     // Get birth times for keys with metadata.
     for (const auto &entry : mapKeyMetadata) {
         if (entry.second.nCreateTime) {
             mapKeyBirth[entry.first] = entry.second.nCreateTime;
         }
     }
 
     // Map in which we'll infer heights of other keys the tip can be
     // reorganized; use a 144-block safety margin.
     CBlockIndex *pindexMax =
         chainActive[std::max(0, chainActive.Height() - 144)];
     std::map<CKeyID, CBlockIndex *> mapKeyFirstBlock;
     std::set<CKeyID> setKeys;
     GetKeys(setKeys);
     for (const CKeyID &keyid : setKeys) {
         if (mapKeyBirth.count(keyid) == 0) {
             mapKeyFirstBlock[keyid] = pindexMax;
         }
     }
     setKeys.clear();
 
     // If there are no such keys, we're done.
     if (mapKeyFirstBlock.empty()) {
         return;
     }
 
     // Find first block that affects those keys, if there are any left.
     std::vector<CKeyID> vAffected;
     for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin();
          it != mapWallet.end(); it++) {
         // Iterate over all wallet transactions...
         const CWalletTx &wtx = (*it).second;
         BlockMap::const_iterator blit = mapBlockIndex.find(wtx.hashBlock);
         if (blit != mapBlockIndex.end() && chainActive.Contains(blit->second)) {
             // ... which are already in a block.
             int nHeight = blit->second->nHeight;
             for (const CTxOut &txout : wtx.tx->vout) {
                 // Iterate over all their outputs...
                 CAffectedKeysVisitor(*this, vAffected)
                     .Process(txout.scriptPubKey);
                 for (const CKeyID &keyid : vAffected) {
                     // ... and all their affected keys.
                     std::map<CKeyID, CBlockIndex *>::iterator rit =
                         mapKeyFirstBlock.find(keyid);
                     if (rit != mapKeyFirstBlock.end() &&
                         nHeight < rit->second->nHeight) {
                         rit->second = blit->second;
                     }
                 }
                 vAffected.clear();
             }
         }
     }
 
     // Extract block timestamps for those keys.
     for (std::map<CKeyID, CBlockIndex *>::const_iterator it =
              mapKeyFirstBlock.begin();
          it != mapKeyFirstBlock.end(); it++) {
         // Block times can be 2h off.
         mapKeyBirth[it->first] = it->second->GetBlockTime() - TIMESTAMP_WINDOW;
     }
 }
 
 /**
  * Compute smart timestamp for a transaction being added to the wallet.
  *
  * Logic:
  * - If sending a transaction, assign its timestamp to the current time.
  * - If receiving a transaction outside a block, assign its timestamp to the
  *   current time.
  * - If receiving a block with a future timestamp, assign all its (not already
  *   known) transactions' timestamps to the current time.
  * - If receiving a block with a past timestamp, before the most recent known
  *   transaction (that we care about), assign all its (not already known)
  *   transactions' timestamps to the same timestamp as that most-recent-known
  *   transaction.
  * - If receiving a block with a past timestamp, but after the most recent known
  *   transaction, assign all its (not already known) transactions' timestamps to
  *   the block time.
  *
  * For more information see CWalletTx::nTimeSmart,
  * https://bitcointalk.org/?topic=54527, or
  * https://github.com/bitcoin/bitcoin/pull/1393.
  */
 unsigned int CWallet::ComputeTimeSmart(const CWalletTx &wtx) const {
     unsigned int nTimeSmart = wtx.nTimeReceived;
     if (!wtx.hashUnset()) {
         if (mapBlockIndex.count(wtx.hashBlock)) {
             int64_t latestNow = wtx.nTimeReceived;
             int64_t latestEntry = 0;
 
             // Tolerate times up to the last timestamp in the wallet not more
             // than 5 minutes into the future
             int64_t latestTolerated = latestNow + 300;
             const TxItems &txOrdered = wtxOrdered;
             for (auto it = txOrdered.rbegin(); it != txOrdered.rend(); ++it) {
                 CWalletTx *const pwtx = it->second.first;
                 if (pwtx == &wtx) {
                     continue;
                 }
                 CAccountingEntry *const pacentry = it->second.second;
                 int64_t nSmartTime;
                 if (pwtx) {
                     nSmartTime = pwtx->nTimeSmart;
                     if (!nSmartTime) {
                         nSmartTime = pwtx->nTimeReceived;
                     }
                 } else {
                     nSmartTime = pacentry->nTime;
                 }
                 if (nSmartTime <= latestTolerated) {
                     latestEntry = nSmartTime;
                     if (nSmartTime > latestNow) {
                         latestNow = nSmartTime;
                     }
                     break;
                 }
             }
 
             int64_t blocktime = mapBlockIndex[wtx.hashBlock]->GetBlockTime();
             nTimeSmart = std::max(latestEntry, std::min(blocktime, latestNow));
         } else {
             LogPrintf("%s: found %s in block %s not in index\n", __func__,
                       wtx.GetId().ToString(), wtx.hashBlock.ToString());
         }
     }
     return nTimeSmart;
 }
 
 bool CWallet::AddDestData(const CTxDestination &dest, const std::string &key,
                           const std::string &value) {
     if (boost::get<CNoDestination>(&dest)) {
         return false;
     }
 
     mapAddressBook[dest].destdata.insert(std::make_pair(key, value));
     if (!fFileBacked) {
         return true;
     }
 
     return CWalletDB(strWalletFile).WriteDestData(dest, key, value);
 }
 
 bool CWallet::EraseDestData(const CTxDestination &dest,
                             const std::string &key) {
     if (!mapAddressBook[dest].destdata.erase(key)) {
         return false;
     }
 
     if (!fFileBacked) {
         return true;
     }
 
     return CWalletDB(strWalletFile).EraseDestData(dest, key);
 }
 
 bool CWallet::LoadDestData(const CTxDestination &dest, const std::string &key,
                            const std::string &value) {
     mapAddressBook[dest].destdata.insert(std::make_pair(key, value));
     return true;
 }
 
 bool CWallet::GetDestData(const CTxDestination &dest, const std::string &key,
                           std::string *value) const {
     std::map<CTxDestination, CAddressBookData>::const_iterator i =
         mapAddressBook.find(dest);
     if (i != mapAddressBook.end()) {
         CAddressBookData::StringMap::const_iterator j =
             i->second.destdata.find(key);
         if (j != i->second.destdata.end()) {
             if (value) {
                 *value = j->second;
             }
 
             return true;
         }
     }
 
     return false;
 }
 
 std::string CWallet::GetWalletHelpString(bool showDebug) {
     std::string strUsage = HelpMessageGroup(_("Wallet options:"));
     strUsage += HelpMessageOpt(
         "-disablewallet",
         _("Do not load the wallet and disable wallet RPC calls"));
     strUsage += HelpMessageOpt(
         "-keypool=<n>", strprintf(_("Set key pool size to <n> (default: %u)"),
                                   DEFAULT_KEYPOOL_SIZE));
     strUsage += HelpMessageOpt(
         "-fallbackfee=<amt>",
         strprintf(_("A fee rate (in %s/kB) that will be used when fee "
                     "estimation has insufficient data (default: %s)"),
                   CURRENCY_UNIT, FormatMoney(DEFAULT_FALLBACK_FEE)));
     strUsage += HelpMessageOpt(
         "-mintxfee=<amt>",
         strprintf(_("Fees (in %s/kB) smaller than this are considered zero fee "
                     "for transaction creation (default: %s)"),
                   CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MINFEE)));
     strUsage += HelpMessageOpt(
         "-paytxfee=<amt>",
         strprintf(
             _("Fee (in %s/kB) to add to transactions you send (default: %s)"),
             CURRENCY_UNIT, FormatMoney(payTxFee.GetFeePerK())));
     strUsage += HelpMessageOpt(
         "-rescan",
         _("Rescan the block chain for missing wallet transactions on startup"));
     strUsage += HelpMessageOpt(
         "-salvagewallet",
         _("Attempt to recover private keys from a corrupt wallet on startup"));
     if (showDebug) {
         strUsage += HelpMessageOpt(
             "-sendfreetransactions",
             strprintf(_("Send transactions as zero-fee transactions if "
                         "possible (default: %d)"),
                       DEFAULT_SEND_FREE_TRANSACTIONS));
     }
 
     strUsage +=
         HelpMessageOpt("-spendzeroconfchange",
                        strprintf(_("Spend unconfirmed change when sending "
                                    "transactions (default: %d)"),
                                  DEFAULT_SPEND_ZEROCONF_CHANGE));
     strUsage +=
         HelpMessageOpt("-txconfirmtarget=<n>",
                        strprintf(_("If paytxfee is not set, include enough fee "
                                    "so transactions begin confirmation on "
                                    "average within n blocks (default: %u)"),
                                  DEFAULT_TX_CONFIRM_TARGET));
     strUsage += HelpMessageOpt(
         "-usehd",
         _("Use hierarchical deterministic key generation (HD) after BIP32. "
           "Only has effect during wallet creation/first start") +
             " " + strprintf(_("(default: %d)"), DEFAULT_USE_HD_WALLET));
     strUsage += HelpMessageOpt("-upgradewallet",
                                _("Upgrade wallet to latest format on startup"));
     strUsage +=
         HelpMessageOpt("-wallet=<file>",
                        _("Specify wallet file (within data directory)") + " " +
                            strprintf(_("(default: %s)"), DEFAULT_WALLET_DAT));
     strUsage += HelpMessageOpt(
         "-walletbroadcast",
         _("Make the wallet broadcast transactions") + " " +
             strprintf(_("(default: %d)"), DEFAULT_WALLETBROADCAST));
     strUsage += HelpMessageOpt("-walletnotify=<cmd>",
                                _("Execute command when a wallet transaction "
                                  "changes (%s in cmd is replaced by TxID)"));
     strUsage += HelpMessageOpt(
         "-zapwallettxes=<mode>",
         _("Delete all wallet transactions and only recover those parts of the "
           "blockchain through -rescan on startup") +
             " " + _("(1 = keep tx meta data e.g. account owner and payment "
                     "request information, 2 = drop tx meta data)"));
 
     if (showDebug) {
         strUsage += HelpMessageGroup(_("Wallet debugging/testing options:"));
 
         strUsage += HelpMessageOpt(
             "-dblogsize=<n>",
             strprintf("Flush wallet database activity from memory to disk log "
                       "every <n> megabytes (default: %u)",
                       DEFAULT_WALLET_DBLOGSIZE));
         strUsage += HelpMessageOpt(
             "-flushwallet",
             strprintf("Run a thread to flush wallet periodically (default: %d)",
                       DEFAULT_FLUSHWALLET));
         strUsage += HelpMessageOpt(
             "-privdb", strprintf("Sets the DB_PRIVATE flag in the wallet db "
                                  "environment (default: %d)",
                                  DEFAULT_WALLET_PRIVDB));
         strUsage += HelpMessageOpt(
             "-walletrejectlongchains",
             strprintf(_("Wallet will not create transactions that violate "
                         "mempool chain limits (default: %d)"),
                       DEFAULT_WALLET_REJECT_LONG_CHAINS));
     }
 
     return strUsage;
 }
 
 CWallet *CWallet::CreateWalletFromFile(const std::string walletFile) {
     // Needed to restore wallet transaction meta data after -zapwallettxes
     std::vector<CWalletTx> vWtx;
 
     if (GetBoolArg("-zapwallettxes", false)) {
         uiInterface.InitMessage(_("Zapping all transactions from wallet..."));
 
         CWallet *tempWallet = new CWallet(walletFile);
         DBErrors nZapWalletRet = tempWallet->ZapWalletTx(vWtx);
         if (nZapWalletRet != DB_LOAD_OK) {
             InitError(
                 strprintf(_("Error loading %s: Wallet corrupted"), walletFile));
             return nullptr;
         }
 
         delete tempWallet;
         tempWallet = nullptr;
     }
 
     uiInterface.InitMessage(_("Loading wallet..."));
 
     int64_t nStart = GetTimeMillis();
     bool fFirstRun = true;
     CWallet *walletInstance = new CWallet(walletFile);
     DBErrors nLoadWalletRet = walletInstance->LoadWallet(fFirstRun);
     if (nLoadWalletRet != DB_LOAD_OK) {
         if (nLoadWalletRet == DB_CORRUPT) {
             InitError(
                 strprintf(_("Error loading %s: Wallet corrupted"), walletFile));
             return nullptr;
         }
 
         if (nLoadWalletRet == DB_NONCRITICAL_ERROR) {
             InitWarning(strprintf(
                 _("Error reading %s! All keys read correctly, but transaction "
                   "data"
                   " or address book entries might be missing or incorrect."),
                 walletFile));
         } else if (nLoadWalletRet == DB_TOO_NEW) {
             InitError(strprintf(
                 _("Error loading %s: Wallet requires newer version of %s"),
                 walletFile, _(PACKAGE_NAME)));
             return nullptr;
         } else if (nLoadWalletRet == DB_NEED_REWRITE) {
             InitError(strprintf(
                 _("Wallet needed to be rewritten: restart %s to complete"),
                 _(PACKAGE_NAME)));
             return nullptr;
         } else {
             InitError(strprintf(_("Error loading %s"), walletFile));
             return nullptr;
         }
     }
 
     if (GetBoolArg("-upgradewallet", fFirstRun)) {
         int nMaxVersion = GetArg("-upgradewallet", 0);
         // The -upgradewallet without argument case
         if (nMaxVersion == 0) {
             LogPrintf("Performing wallet upgrade to %i\n", FEATURE_LATEST);
             nMaxVersion = CLIENT_VERSION;
             // permanently upgrade the wallet immediately
             walletInstance->SetMinVersion(FEATURE_LATEST);
         } else {
             LogPrintf("Allowing wallet upgrade up to %i\n", nMaxVersion);
         }
 
         if (nMaxVersion < walletInstance->GetVersion()) {
             InitError(_("Cannot downgrade wallet"));
             return nullptr;
         }
 
         walletInstance->SetMaxVersion(nMaxVersion);
     }
 
     if (fFirstRun) {
         // Create new keyUser and set as default key.
         if (GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET) &&
             !walletInstance->IsHDEnabled()) {
 
             // Ensure this wallet.dat can only be opened by clients supporting
             // HD with chain split.
             walletInstance->SetMinVersion(FEATURE_HD_SPLIT);
 
             // Generate a new master key.
             CPubKey masterPubKey = walletInstance->GenerateNewHDMasterKey();
             if (!walletInstance->SetHDMasterKey(masterPubKey)) {
                 throw std::runtime_error(std::string(__func__) +
                                          ": Storing master key failed");
             }
         }
 
         CPubKey newDefaultKey;
         if (walletInstance->GetKeyFromPool(newDefaultKey, false)) {
             walletInstance->SetDefaultKey(newDefaultKey);
             if (!walletInstance->SetAddressBook(
                     walletInstance->vchDefaultKey.GetID(), "", "receive")) {
                 InitError(_("Cannot write default address") += "\n");
                 return nullptr;
             }
         }
 
         walletInstance->SetBestChain(chainActive.GetLocator());
     } else if (IsArgSet("-usehd")) {
         bool useHD = GetBoolArg("-usehd", DEFAULT_USE_HD_WALLET);
         if (walletInstance->IsHDEnabled() && !useHD) {
             InitError(strprintf(_("Error loading %s: You can't disable HD on a "
                                   "already existing HD wallet"),
                                 walletFile));
             return nullptr;
         }
 
         if (!walletInstance->IsHDEnabled() && useHD) {
             InitError(strprintf(_("Error loading %s: You can't enable HD on a "
                                   "already existing non-HD wallet"),
                                 walletFile));
             return nullptr;
         }
     }
 
     LogPrintf(" wallet      %15dms\n", GetTimeMillis() - nStart);
 
     RegisterValidationInterface(walletInstance);
 
     CBlockIndex *pindexRescan = chainActive.Tip();
     if (GetBoolArg("-rescan", false)) {
         pindexRescan = chainActive.Genesis();
     } else {
         CWalletDB walletdb(walletFile);
         CBlockLocator locator;
         if (walletdb.ReadBestBlock(locator)) {
             pindexRescan = FindForkInGlobalIndex(chainActive, locator);
         } else {
             pindexRescan = chainActive.Genesis();
         }
     }
 
     if (chainActive.Tip() && chainActive.Tip() != pindexRescan) {
         // We can't rescan beyond non-pruned blocks, stop and throw an error.
         // This might happen if a user uses a old wallet within a pruned node or
         // if he ran -disablewallet for a longer time, then decided to
         // re-enable.
         if (fPruneMode) {
             CBlockIndex *block = chainActive.Tip();
             while (block && block->pprev &&
                    (block->pprev->nStatus & BLOCK_HAVE_DATA) &&
                    block->pprev->nTx > 0 && pindexRescan != block) {
                 block = block->pprev;
             }
 
             if (pindexRescan != block) {
                 InitError(_("Prune: last wallet synchronisation goes beyond "
                             "pruned data. You need to -reindex (download the "
                             "whole blockchain again in case of pruned node)"));
                 return nullptr;
             }
         }
 
         uiInterface.InitMessage(_("Rescanning..."));
         LogPrintf("Rescanning last %i blocks (from block %i)...\n",
                   chainActive.Height() - pindexRescan->nHeight,
                   pindexRescan->nHeight);
         nStart = GetTimeMillis();
         walletInstance->ScanForWalletTransactions(pindexRescan, true);
         LogPrintf(" rescan      %15dms\n", GetTimeMillis() - nStart);
         walletInstance->SetBestChain(chainActive.GetLocator());
         CWalletDB::IncrementUpdateCounter();
 
         // Restore wallet transaction metadata after -zapwallettxes=1
         if (GetBoolArg("-zapwallettxes", false) &&
             GetArg("-zapwallettxes", "1") != "2") {
             CWalletDB walletdb(walletFile);
 
             for (const CWalletTx &wtxOld : vWtx) {
                 uint256 txid = wtxOld.GetId();
                 std::map<uint256, CWalletTx>::iterator mi =
                     walletInstance->mapWallet.find(txid);
                 if (mi != walletInstance->mapWallet.end()) {
                     const CWalletTx *copyFrom = &wtxOld;
                     CWalletTx *copyTo = &mi->second;
                     copyTo->mapValue = copyFrom->mapValue;
                     copyTo->vOrderForm = copyFrom->vOrderForm;
                     copyTo->nTimeReceived = copyFrom->nTimeReceived;
                     copyTo->nTimeSmart = copyFrom->nTimeSmart;
                     copyTo->fFromMe = copyFrom->fFromMe;
                     copyTo->strFromAccount = copyFrom->strFromAccount;
                     copyTo->nOrderPos = copyFrom->nOrderPos;
                     walletdb.WriteTx(*copyTo);
                 }
             }
         }
     }
 
     walletInstance->SetBroadcastTransactions(
         GetBoolArg("-walletbroadcast", DEFAULT_WALLETBROADCAST));
 
     LOCK(walletInstance->cs_wallet);
     LogPrintf("setKeyPool.size() = %u\n", walletInstance->GetKeyPoolSize());
     LogPrintf("mapWallet.size() = %u\n", walletInstance->mapWallet.size());
     LogPrintf("mapAddressBook.size() = %u\n",
               walletInstance->mapAddressBook.size());
 
     return walletInstance;
 }
 
 bool CWallet::InitLoadWallet() {
     if (GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) {
         pwalletMain = nullptr;
         LogPrintf("Wallet disabled!\n");
         return true;
     }
 
     std::string walletFile = GetArg("-wallet", DEFAULT_WALLET_DAT);
 
     if (walletFile.find_first_of("/\\") != std::string::npos) {
         return InitError(
             _("-wallet parameter must only specify a filename (not a path)"));
     } else if (SanitizeString(walletFile, SAFE_CHARS_FILENAME) != walletFile) {
         return InitError(_("Invalid characters in -wallet filename"));
     }
 
     CWallet *const pwallet = CreateWalletFromFile(walletFile);
     if (!pwallet) {
         return false;
     }
 
     pwalletMain = pwallet;
 
     return true;
 }
 
 std::atomic<bool> CWallet::fFlushScheduled(false);
 
 void CWallet::postInitProcess(CScheduler &scheduler) {
     // Add wallet transactions that aren't already in a block to mempool.
     // Do this here as mempool requires genesis block to be loaded.
     ReacceptWalletTransactions();
 
     // Run a thread to flush wallet periodically.
     if (!CWallet::fFlushScheduled.exchange(true)) {
         scheduler.scheduleEvery(MaybeCompactWalletDB, 500);
     }
 }
 
 bool CWallet::ParameterInteraction() {
     if (GetBoolArg("-disablewallet", DEFAULT_DISABLE_WALLET)) {
         return true;
     }
 
     if (GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY) &&
         SoftSetBoolArg("-walletbroadcast", false)) {
         LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting "
                   "-walletbroadcast=0\n",
                   __func__);
     }
 
     if (GetBoolArg("-salvagewallet", false) &&
         SoftSetBoolArg("-rescan", true)) {
         // Rewrite just private keys: rescan to find transactions
         LogPrintf("%s: parameter interaction: -salvagewallet=1 -> setting "
                   "-rescan=1\n",
                   __func__);
     }
 
     // -zapwallettx implies a rescan
     if (GetBoolArg("-zapwallettxes", false) &&
         SoftSetBoolArg("-rescan", true)) {
         LogPrintf("%s: parameter interaction: -zapwallettxes=<mode> -> setting "
                   "-rescan=1\n",
                   __func__);
     }
 
     if (GetBoolArg("-sysperms", false)) {
         return InitError("-sysperms is not allowed in combination with enabled "
                          "wallet functionality");
     }
 
     if (GetArg("-prune", 0) && GetBoolArg("-rescan", false)) {
         return InitError(
             _("Rescans are not possible in pruned mode. You will need to use "
               "-reindex which will download the whole blockchain again."));
     }
 
     if (::minRelayTxFee.GetFeePerK() > HIGH_TX_FEE_PER_KB) {
         InitWarning(
             AmountHighWarn("-minrelaytxfee") + " " +
             _("The wallet will avoid paying less than the minimum relay fee."));
     }
 
     if (IsArgSet("-mintxfee")) {
         Amount n(0);
         auto parsed = ParseMoney(GetArg("-mintxfee", ""), n);
         if (!parsed || Amount(0) == n) {
             return InitError(AmountErrMsg("mintxfee", GetArg("-mintxfee", "")));
         }
 
         if (n > HIGH_TX_FEE_PER_KB) {
             InitWarning(AmountHighWarn("-mintxfee") + " " +
                         _("This is the minimum transaction fee you pay on "
                           "every transaction."));
         }
 
         CWallet::minTxFee = CFeeRate(n);
     }
 
     if (IsArgSet("-fallbackfee")) {
         Amount nFeePerK(0);
         if (!ParseMoney(GetArg("-fallbackfee", ""), nFeePerK)) {
             return InitError(
                 strprintf(_("Invalid amount for -fallbackfee=<amount>: '%s'"),
                           GetArg("-fallbackfee", "")));
         }
 
         if (nFeePerK > HIGH_TX_FEE_PER_KB) {
             InitWarning(AmountHighWarn("-fallbackfee") + " " +
                         _("This is the transaction fee you may pay when fee "
                           "estimates are not available."));
         }
 
         CWallet::fallbackFee = CFeeRate(nFeePerK);
     }
 
     if (IsArgSet("-paytxfee")) {
         Amount nFeePerK(0);
         if (!ParseMoney(GetArg("-paytxfee", ""), nFeePerK)) {
             return InitError(AmountErrMsg("paytxfee", GetArg("-paytxfee", "")));
         }
 
         if (nFeePerK > HIGH_TX_FEE_PER_KB) {
             InitWarning(AmountHighWarn("-paytxfee") + " " +
                         _("This is the transaction fee you will pay if you "
                           "send a transaction."));
         }
 
         payTxFee = CFeeRate(nFeePerK, 1000);
         if (payTxFee < ::minRelayTxFee) {
             return InitError(
                 strprintf(_("Invalid amount for -paytxfee=<amount>: '%s' (must "
                             "be at least %s)"),
                           GetArg("-paytxfee", ""), ::minRelayTxFee.ToString()));
         }
     }
 
     if (IsArgSet("-maxtxfee")) {
         Amount nMaxFee(0);
         if (!ParseMoney(GetArg("-maxtxfee", ""), nMaxFee)) {
             return InitError(AmountErrMsg("maxtxfee", GetArg("-maxtxfee", "")));
         }
 
         if (nMaxFee > HIGH_MAX_TX_FEE) {
             InitWarning(_("-maxtxfee is set very high! Fees this large could "
                           "be paid on a single transaction."));
         }
 
         maxTxFee = nMaxFee;
         if (CFeeRate(maxTxFee, 1000) < ::minRelayTxFee) {
             return InitError(
                 strprintf(_("Invalid amount for -maxtxfee=<amount>: '%s' (must "
                             "be at least the minrelay fee of %s to prevent "
                             "stuck transactions)"),
                           GetArg("-maxtxfee", ""), ::minRelayTxFee.ToString()));
         }
     }
 
     nTxConfirmTarget = GetArg("-txconfirmtarget", DEFAULT_TX_CONFIRM_TARGET);
     bSpendZeroConfChange =
         GetBoolArg("-spendzeroconfchange", DEFAULT_SPEND_ZEROCONF_CHANGE);
     fSendFreeTransactions =
         GetBoolArg("-sendfreetransactions", DEFAULT_SEND_FREE_TRANSACTIONS);
 
     if (fSendFreeTransactions &&
         GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0) {
         return InitError("Creation of free transactions with their relay "
                          "disabled is not supported.");
     }
 
     return true;
 }
 
 bool CWallet::BackupWallet(const std::string &strDest) {
     if (!fFileBacked) {
         return false;
     }
 
     while (true) {
         {
             LOCK(bitdb.cs_db);
             if (!bitdb.mapFileUseCount.count(strWalletFile) ||
                 bitdb.mapFileUseCount[strWalletFile] == 0) {
                 // Flush log data to the dat file.
                 bitdb.CloseDb(strWalletFile);
                 bitdb.CheckpointLSN(strWalletFile);
                 bitdb.mapFileUseCount.erase(strWalletFile);
 
                 // Copy wallet file.
                 fs::path pathSrc = GetDataDir() / strWalletFile;
                 fs::path pathDest(strDest);
                 if (fs::is_directory(pathDest)) {
                     pathDest /= strWalletFile;
                 }
 
                 try {
 
                     if (fs::equivalent(pathSrc, pathDest)) {
                         LogPrintf("cannot backup to wallet source file %s\n",
                                   pathDest.string());
                         return false;
                     }
 
 #if BOOST_VERSION >= 104000
                     fs::copy_file(pathSrc, pathDest,
                                   fs::copy_option::overwrite_if_exists);
 #else
                     fs::copy_file(pathSrc, pathDest);
 #endif
                     LogPrintf("copied %s to %s\n", strWalletFile,
                               pathDest.string());
                     return true;
                 } catch (const fs::filesystem_error &e) {
                     LogPrintf("error copying %s to %s - %s\n", strWalletFile,
                               pathDest.string(), e.what());
                     return false;
                 }
             }
         }
 
         MilliSleep(100);
     }
 
     return false;
 }
 
 CKeyPool::CKeyPool() {
     nTime = GetTime();
     fInternal = false;
 }
 
 CKeyPool::CKeyPool(const CPubKey &vchPubKeyIn, bool internalIn) {
     nTime = GetTime();
     vchPubKey = vchPubKeyIn;
     fInternal = internalIn;
 }
 
 CWalletKey::CWalletKey(int64_t nExpires) {
     nTimeCreated = (nExpires ? GetTime() : 0);
     nTimeExpires = nExpires;
 }
 
 void CMerkleTx::SetMerkleBranch(const CBlockIndex *pindex, int posInBlock) {
     // Update the tx's hashBlock
     hashBlock = pindex->GetBlockHash();
 
     // Set the position of the transaction in the block.
     nIndex = posInBlock;
 }
 
 int CMerkleTx::GetDepthInMainChain(const CBlockIndex *&pindexRet) const {
     if (hashUnset()) {
         return 0;
     }
 
     AssertLockHeld(cs_main);
 
     // Find the block it claims to be in.
     BlockMap::iterator mi = mapBlockIndex.find(hashBlock);
     if (mi == mapBlockIndex.end()) {
         return 0;
     }
 
     CBlockIndex *pindex = (*mi).second;
     if (!pindex || !chainActive.Contains(pindex)) {
         return 0;
     }
 
     pindexRet = pindex;
     return ((nIndex == -1) ? (-1) : 1) *
            (chainActive.Height() - pindex->nHeight + 1);
 }
 
 int CMerkleTx::GetBlocksToMaturity() const {
     if (!IsCoinBase()) {
         return 0;
     }
 
     return std::max(0, (COINBASE_MATURITY + 1) - GetDepthInMainChain());
 }
 
 bool CMerkleTx::AcceptToMemoryPool(const Amount nAbsurdFee,
                                    CValidationState &state) {
     return ::AcceptToMemoryPool(GetConfig(), mempool, state, tx, true, nullptr,
                                 nullptr, false, nAbsurdFee);
 }
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 1c51cb6d0..9f99fae0a 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -1,1179 +1,1187 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_WALLET_WALLET_H
 #define BITCOIN_WALLET_WALLET_H
 
 #include "amount.h"
 #include "script/ismine.h"
 #include "script/sign.h"
 #include "streams.h"
 #include "tinyformat.h"
 #include "ui_interface.h"
 #include "utilstrencodings.h"
 #include "validationinterface.h"
 #include "wallet/crypter.h"
 #include "wallet/rpcwallet.h"
 #include "wallet/walletdb.h"
 
 #include <boost/thread.hpp>
 
 #include <algorithm>
 #include <atomic>
 #include <cstdint>
 #include <map>
 #include <set>
 #include <stdexcept>
 #include <string>
 #include <utility>
 #include <vector>
 
 extern CWallet *pwalletMain;
 
 /**
  * Settings
  */
 extern CFeeRate payTxFee;
 extern unsigned int nTxConfirmTarget;
 extern bool bSpendZeroConfChange;
 extern bool fSendFreeTransactions;
 
 static const unsigned int DEFAULT_KEYPOOL_SIZE = 100;
 //! -paytxfee default
 static const Amount DEFAULT_TRANSACTION_FEE(0);
 //! -fallbackfee default
 static const Amount DEFAULT_FALLBACK_FEE(20000);
 //! -mintxfee default
 static const Amount DEFAULT_TRANSACTION_MINFEE(1000);
 //! minimum recommended increment for BIP 125 replacement txs
 static const Amount WALLET_INCREMENTAL_RELAY_FEE(5000);
 //! target minimum change amount
 static const Amount MIN_CHANGE = CENT;
 //! final minimum change amount after paying for fees
 static const Amount MIN_FINAL_CHANGE = MIN_CHANGE / 2;
 //! Default for -spendzeroconfchange
 static const bool DEFAULT_SPEND_ZEROCONF_CHANGE = true;
 //! Default for -sendfreetransactions
 static const bool DEFAULT_SEND_FREE_TRANSACTIONS = false;
 //! Default for -walletrejectlongchains
 static const bool DEFAULT_WALLET_REJECT_LONG_CHAINS = false;
 //! -txconfirmtarget default
 static const unsigned int DEFAULT_TX_CONFIRM_TARGET = 6;
 //! Largest (in bytes) free transaction we're willing to create
 static const unsigned int MAX_FREE_TRANSACTION_CREATE_SIZE = 1000;
 static const bool DEFAULT_WALLETBROADCAST = true;
 static const bool DEFAULT_DISABLE_WALLET = false;
 //! if set, all keys will be derived by using BIP32
 static const bool DEFAULT_USE_HD_WALLET = true;
 
 extern const char *DEFAULT_WALLET_DAT;
 
 class CBlockIndex;
 class CCoinControl;
 class COutput;
 class CReserveKey;
 class CScript;
 class CScheduler;
 class CTxMemPool;
 class CWalletTx;
 
 /** (client) version numbers for particular wallet features */
 enum WalletFeature {
     // the earliest version new wallets supports (only useful for getinfo's
     // clientversion output)
     FEATURE_BASE = 10500,
 
     // wallet encryption
     FEATURE_WALLETCRYPT = 40000,
     // compressed public keys
     FEATURE_COMPRPUBKEY = 60000,
 
     // Hierarchical key derivation after BIP32 (HD Wallet)
     FEATURE_HD = 130000,
 
     // Wallet with HD chain split (change outputs will use m/0'/1'/k)
     FEATURE_HD_SPLIT = 160300,
 
     // HD is optional, use FEATURE_COMPRPUBKEY as latest version
     FEATURE_LATEST = FEATURE_COMPRPUBKEY,
 };
 
 /** A key pool entry */
 class CKeyPool {
 public:
     int64_t nTime;
     CPubKey vchPubKey;
     bool fInternal; // for change outputs
 
     CKeyPool();
     CKeyPool(const CPubKey &vchPubKeyIn, bool internalIn);
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         int nVersion = s.GetVersion();
         if (!(s.GetType() & SER_GETHASH)) {
             READWRITE(nVersion);
         }
 
         READWRITE(nTime);
         READWRITE(vchPubKey);
         if (ser_action.ForRead()) {
             try {
                 READWRITE(fInternal);
             } catch (std::ios_base::failure &) {
                 /**
                  * flag as external address if we can't read the internal
                  * boolean
                  * (this will be the case for any wallet before the HD chain
                  * split version)
                  */
                 fInternal = false;
             }
         } else {
             READWRITE(fInternal);
         }
     }
 };
 
 /** Address book data */
 class CAddressBookData {
 public:
     std::string name;
     std::string purpose;
 
     CAddressBookData() { purpose = "unknown"; }
 
     typedef std::map<std::string, std::string> StringMap;
     StringMap destdata;
 };
 
 struct CRecipient {
     CScript scriptPubKey;
     Amount nAmount;
     bool fSubtractFeeFromAmount;
 };
 
 typedef std::map<std::string, std::string> mapValue_t;
 
 static inline void ReadOrderPos(int64_t &nOrderPos, mapValue_t &mapValue) {
     if (!mapValue.count("n")) {
         // TODO: calculate elsewhere
         nOrderPos = -1;
         return;
     }
 
     nOrderPos = atoi64(mapValue["n"].c_str());
 }
 
 static inline void WriteOrderPos(const int64_t &nOrderPos,
                                  mapValue_t &mapValue) {
     if (nOrderPos == -1) return;
     mapValue["n"] = i64tostr(nOrderPos);
 }
 
 struct COutputEntry {
     CTxDestination destination;
     Amount amount;
     int vout;
 };
 
 /** A transaction with a merkle branch linking it to the block chain. */
 class CMerkleTx {
 private:
     /** Constant used in hashBlock to indicate tx has been abandoned */
     static const uint256 ABANDON_HASH;
 
 public:
     CTransactionRef tx;
     uint256 hashBlock;
 
     /**
      * An nIndex == -1 means that hashBlock (in nonzero) refers to the earliest
      * block in the chain we know this or any in-wallet dependency conflicts
      * with. Older clients interpret nIndex == -1 as unconfirmed for backward
      * compatibility.
      */
     int nIndex;
 
     CMerkleTx() {
         SetTx(MakeTransactionRef());
         Init();
     }
 
     CMerkleTx(CTransactionRef arg) {
         SetTx(std::move(arg));
         Init();
     }
 
     /**
      * Helper conversion operator to allow passing CMerkleTx where CTransaction
      * is expected.
      * TODO: adapt callers and remove this operator.
      */
     operator const CTransaction &() const { return *tx; }
 
     void Init() {
         hashBlock = uint256();
         nIndex = -1;
     }
 
     void SetTx(CTransactionRef arg) { tx = std::move(arg); }
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         // For compatibility with older versions.
         std::vector<uint256> vMerkleBranch;
         READWRITE(tx);
         READWRITE(hashBlock);
         READWRITE(vMerkleBranch);
         READWRITE(nIndex);
     }
 
     void SetMerkleBranch(const CBlockIndex *pIndex, int posInBlock);
 
     /**
      * Return depth of transaction in blockchain:
      * <0  : conflicts with a transaction this deep in the blockchain
      *  0  : in memory pool, waiting to be included in a block
      * >=1 : this many blocks deep in the main chain
      */
     int GetDepthInMainChain(const CBlockIndex *&pindexRet) const;
     int GetDepthInMainChain() const {
         const CBlockIndex *pindexRet;
         return GetDepthInMainChain(pindexRet);
     }
     bool IsInMainChain() const {
         const CBlockIndex *pindexRet;
         return GetDepthInMainChain(pindexRet) > 0;
     }
     int GetBlocksToMaturity() const;
     /**
      * Pass this transaction to the mempool. Fails if absolute fee exceeds
      * absurd fee.
      */
     bool AcceptToMemoryPool(const Amount nAbsurdFee, CValidationState &state);
     bool hashUnset() const {
         return (hashBlock.IsNull() || hashBlock == ABANDON_HASH);
     }
     bool isAbandoned() const { return (hashBlock == ABANDON_HASH); }
     void setAbandoned() { hashBlock = ABANDON_HASH; }
 
     const TxId GetId() const { return tx->GetId(); }
     bool IsCoinBase() const { return tx->IsCoinBase(); }
 };
 
 /**
  * A transaction with a bunch of additional info that only the owner cares
  * about. It includes any unrecorded transactions needed to link it back to the
  * block chain.
  */
 class CWalletTx : public CMerkleTx {
 private:
     const CWallet *pwallet;
 
 public:
     mapValue_t mapValue;
     std::vector<std::pair<std::string, std::string>> vOrderForm;
     unsigned int fTimeReceivedIsTxTime;
     //!< time received by this node
     unsigned int nTimeReceived;
     /**
      * Stable timestamp that never changes, and reflects the order a transaction
      * was added to the wallet. Timestamp is based on the block time for a
      * transaction added as part of a block, or else the time when the
      * transaction was received if it wasn't part of a block, with the timestamp
      * adjusted in both cases so timestamp order matches the order transactions
      * were added to the wallet. More details can be found in
      * CWallet::ComputeTimeSmart().
      */
     unsigned int nTimeSmart;
     /**
      * From me flag is set to 1 for transactions that were created by the wallet
      * on this bitcoin node, and set to 0 for transactions that were created
      * externally and came in through the network or sendrawtransaction RPC.
      */
     char fFromMe;
     std::string strFromAccount;
     //!< position in ordered transaction list
     int64_t nOrderPos;
 
     // memory only
     mutable bool fDebitCached;
     mutable bool fCreditCached;
     mutable bool fImmatureCreditCached;
     mutable bool fAvailableCreditCached;
     mutable bool fWatchDebitCached;
     mutable bool fWatchCreditCached;
     mutable bool fImmatureWatchCreditCached;
     mutable bool fAvailableWatchCreditCached;
     mutable bool fChangeCached;
     mutable Amount nDebitCached;
     mutable Amount nCreditCached;
     mutable Amount nImmatureCreditCached;
     mutable Amount nAvailableCreditCached;
     mutable Amount nWatchDebitCached;
     mutable Amount nWatchCreditCached;
     mutable Amount nImmatureWatchCreditCached;
     mutable Amount nAvailableWatchCreditCached;
     mutable Amount nChangeCached;
 
     CWalletTx() { Init(nullptr); }
 
     CWalletTx(const CWallet *pwalletIn, CTransactionRef arg)
         : CMerkleTx(std::move(arg)) {
         Init(pwalletIn);
     }
 
     void Init(const CWallet *pwalletIn) {
         pwallet = pwalletIn;
         mapValue.clear();
         vOrderForm.clear();
         fTimeReceivedIsTxTime = false;
         nTimeReceived = 0;
         nTimeSmart = 0;
         fFromMe = false;
         strFromAccount.clear();
         fDebitCached = false;
         fCreditCached = false;
         fImmatureCreditCached = false;
         fAvailableCreditCached = false;
         fWatchDebitCached = false;
         fWatchCreditCached = false;
         fImmatureWatchCreditCached = false;
         fAvailableWatchCreditCached = false;
         fChangeCached = false;
         nDebitCached = Amount(0);
         nCreditCached = Amount(0);
         nImmatureCreditCached = Amount(0);
         nAvailableCreditCached = Amount(0);
         nWatchDebitCached = Amount(0);
         nWatchCreditCached = Amount(0);
         nAvailableWatchCreditCached = Amount(0);
         nImmatureWatchCreditCached = Amount(0);
         nChangeCached = Amount(0);
         nOrderPos = -1;
     }
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         if (ser_action.ForRead()) Init(nullptr);
         char fSpent = false;
 
         if (!ser_action.ForRead()) {
             mapValue["fromaccount"] = strFromAccount;
 
             WriteOrderPos(nOrderPos, mapValue);
 
             if (nTimeSmart) mapValue["timesmart"] = strprintf("%u", nTimeSmart);
         }
 
         READWRITE(*(CMerkleTx *)this);
         //!< Used to be vtxPrev
         std::vector<CMerkleTx> vUnused;
         READWRITE(vUnused);
         READWRITE(mapValue);
         READWRITE(vOrderForm);
         READWRITE(fTimeReceivedIsTxTime);
         READWRITE(nTimeReceived);
         READWRITE(fFromMe);
         READWRITE(fSpent);
 
         if (ser_action.ForRead()) {
             strFromAccount = mapValue["fromaccount"];
 
             ReadOrderPos(nOrderPos, mapValue);
 
             nTimeSmart = mapValue.count("timesmart")
                              ? (unsigned int)atoi64(mapValue["timesmart"])
                              : 0;
         }
 
         mapValue.erase("fromaccount");
         mapValue.erase("version");
         mapValue.erase("spent");
         mapValue.erase("n");
         mapValue.erase("timesmart");
     }
 
     //! make sure balances are recalculated
     void MarkDirty() {
         fCreditCached = false;
         fAvailableCreditCached = false;
         fImmatureCreditCached = false;
         fWatchDebitCached = false;
         fWatchCreditCached = false;
         fAvailableWatchCreditCached = false;
         fImmatureWatchCreditCached = false;
         fDebitCached = false;
         fChangeCached = false;
     }
 
     void BindWallet(CWallet *pwalletIn) {
         pwallet = pwalletIn;
         MarkDirty();
     }
 
     //! filter decides which addresses will count towards the debit
     Amount GetDebit(const isminefilter &filter) const;
     Amount GetCredit(const isminefilter &filter) const;
     Amount GetImmatureCredit(bool fUseCache = true) const;
     Amount GetAvailableCredit(bool fUseCache = true) const;
     Amount GetImmatureWatchOnlyCredit(const bool &fUseCache = true) const;
     Amount GetAvailableWatchOnlyCredit(const bool &fUseCache = true) const;
     Amount GetChange() const;
 
     void GetAmounts(std::list<COutputEntry> &listReceived,
                     std::list<COutputEntry> &listSent, Amount &nFee,
                     std::string &strSentAccount,
                     const isminefilter &filter) const;
 
     void GetAccountAmounts(const std::string &strAccount, Amount &nReceived,
                            Amount &nSent, Amount &nFee,
                            const isminefilter &filter) const;
 
     bool IsFromMe(const isminefilter &filter) const {
         return (GetDebit(filter) > Amount(0));
     }
 
     // True if only scriptSigs are different
     bool IsEquivalentTo(const CWalletTx &tx) const;
 
     bool InMempool() const;
     bool IsTrusted() const;
 
     int64_t GetTxTime() const;
     int GetRequestCount() const;
 
     bool RelayWalletTransaction(CConnman *connman);
 
     std::set<uint256> GetConflicts() const;
 };
 
 class COutput {
 public:
     const CWalletTx *tx;
     int i;
     int nDepth;
     bool fSpendable;
     bool fSolvable;
 
     COutput(const CWalletTx *txIn, int iIn, int nDepthIn, bool fSpendableIn,
             bool fSolvableIn) {
         tx = txIn;
         i = iIn;
         nDepth = nDepthIn;
         fSpendable = fSpendableIn;
         fSolvable = fSolvableIn;
     }
 
     std::string ToString() const;
 };
 
 /** Private key that includes an expiration date in case it never gets used. */
 class CWalletKey {
 public:
     CPrivKey vchPrivKey;
     int64_t nTimeCreated;
     int64_t nTimeExpires;
     std::string strComment;
     //! todo: add something to note what created it (user, getnewaddress,
     //! change) maybe should have a map<string, string> property map
 
     CWalletKey(int64_t nExpires = 0);
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         int nVersion = s.GetVersion();
         if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion);
         READWRITE(vchPrivKey);
         READWRITE(nTimeCreated);
         READWRITE(nTimeExpires);
         READWRITE(LIMITED_STRING(strComment, 65536));
     }
 };
 
 /**
  * Internal transfers.
  * Database key is acentry<account><counter>.
  */
 class CAccountingEntry {
 public:
     std::string strAccount;
     Amount nCreditDebit;
     int64_t nTime;
     std::string strOtherAccount;
     std::string strComment;
     mapValue_t mapValue;
     //!< position in ordered transaction list
     int64_t nOrderPos;
     uint64_t nEntryNo;
 
     CAccountingEntry() { SetNull(); }
 
     void SetNull() {
         nCreditDebit = Amount(0);
         nTime = 0;
         strAccount.clear();
         strOtherAccount.clear();
         strComment.clear();
         nOrderPos = -1;
         nEntryNo = 0;
     }
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         int nVersion = s.GetVersion();
         if (!(s.GetType() & SER_GETHASH)) READWRITE(nVersion);
         //! Note: strAccount is serialized as part of the key, not here.
         READWRITE(nCreditDebit);
         READWRITE(nTime);
         READWRITE(LIMITED_STRING(strOtherAccount, 65536));
 
         if (!ser_action.ForRead()) {
             WriteOrderPos(nOrderPos, mapValue);
 
             if (!(mapValue.empty() && _ssExtra.empty())) {
                 CDataStream ss(s.GetType(), s.GetVersion());
                 ss.insert(ss.begin(), '\0');
                 ss << mapValue;
                 ss.insert(ss.end(), _ssExtra.begin(), _ssExtra.end());
                 strComment.append(ss.str());
             }
         }
 
         READWRITE(LIMITED_STRING(strComment, 65536));
 
         size_t nSepPos = strComment.find("\0", 0, 1);
         if (ser_action.ForRead()) {
             mapValue.clear();
             if (std::string::npos != nSepPos) {
                 CDataStream ss(
                     std::vector<char>(strComment.begin() + nSepPos + 1,
                                       strComment.end()),
                     s.GetType(), s.GetVersion());
                 ss >> mapValue;
                 _ssExtra = std::vector<char>(ss.begin(), ss.end());
             }
             ReadOrderPos(nOrderPos, mapValue);
         }
         if (std::string::npos != nSepPos) strComment.erase(nSepPos);
 
         mapValue.erase("n");
     }
 
 private:
     std::vector<char> _ssExtra;
 };
 
 /**
  * A CWallet is an extension of a keystore, which also maintains a set of
  * transactions and balances, and provides the ability to create new
  * transactions.
  */
 class CWallet : public CCryptoKeyStore, public CValidationInterface {
 private:
     static std::atomic<bool> fFlushScheduled;
 
     /**
      * Select a set of coins such that nValueRet >= nTargetValue and at least
      * all coins from coinControl are selected; Never select unconfirmed coins
      * if they are not ours.
      */
     bool SelectCoins(
         const std::vector<COutput> &vAvailableCoins, const Amount nTargetValue,
         std::set<std::pair<const CWalletTx *, unsigned int>> &setCoinsRet,
         Amount &nValueRet, const CCoinControl *coinControl = nullptr) const;
 
     CWalletDB *pwalletdbEncryption;
 
     //! the current wallet version: clients below this version are not able to
     //! load the wallet
     int nWalletVersion;
 
     //! the maximum wallet format version: memory-only variable that specifies
     //! to what version this wallet may be upgraded
     int nWalletMaxVersion;
 
     int64_t nNextResend;
     int64_t nLastResend;
     bool fBroadcastTransactions;
 
     /**
      * Used to keep track of spent outpoints, and detect and report conflicts
      * (double-spends or mutated transactions where the mutant gets mined).
      */
     typedef std::multimap<COutPoint, uint256> TxSpends;
     TxSpends mapTxSpends;
     void AddToSpends(const COutPoint &outpoint, const uint256 &wtxid);
     void AddToSpends(const uint256 &wtxid);
 
     /* Mark a transaction (and its in-wallet descendants) as conflicting with a
      * particular block. */
     void MarkConflicted(const uint256 &hashBlock, const uint256 &hashTx);
 
     void SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator>);
 
+    /* Used by TransactionAddedToMemorypool/BlockConnected/Disconnected */
+    void SyncTransaction(const CTransactionRef &tx,
+                         const CBlockIndex *pindexBlockConnected,
+                         int posInBlock);
+
     /* the HD chain data model (external chain counters) */
     CHDChain hdChain;
 
     /* HD derive new child key (on internal or external chain) */
     void DeriveNewChildKey(CKeyMetadata &metadata, CKey &secret,
                            bool internal = false);
 
     bool fFileBacked;
 
     std::set<int64_t> setKeyPool;
 
     int64_t nTimeFirstKey;
 
     /**
      * Private version of AddWatchOnly method which does not accept a timestamp,
      * and which will reset the wallet's nTimeFirstKey value to 1 if the watch
      * key did not previously have a timestamp associated with it. Because this
      * is an inherited virtual method, it is accessible despite being marked
      * private, but it is marked private anyway to encourage use of the other
      * AddWatchOnly which accepts a timestamp and sets nTimeFirstKey more
      * intelligently for more efficient rescans.
      */
     bool AddWatchOnly(const CScript &dest) override;
 
+    // Used to NotifyTransactionChanged of the previous block's coinbase when
+    // the next block comes in
+    uint256 hashPrevBestCoinbase;
+
 public:
     /*
      * Main wallet lock.
      * This lock protects all the fields added by CWallet
      *   except for:
      *      fFileBacked (immutable after instantiation)
      *      strWalletFile (immutable after instantiation)
      */
     mutable CCriticalSection cs_wallet;
 
     const std::string strWalletFile;
 
     void LoadKeyPool(int nIndex, const CKeyPool &keypool) {
         setKeyPool.insert(nIndex);
 
         // If no metadata exists yet, create a default with the pool key's
         // creation time. Note that this may be overwritten by actually stored
         // metadata for that key later, which is fine.
         CKeyID keyid = keypool.vchPubKey.GetID();
         if (mapKeyMetadata.count(keyid) == 0)
             mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
     }
 
     // Map from Key ID (for regular keys) or Script ID (for watch-only keys) to
     // key metadata.
     std::map<CTxDestination, CKeyMetadata> mapKeyMetadata;
 
     typedef std::map<unsigned int, CMasterKey> MasterKeyMap;
     MasterKeyMap mapMasterKeys;
     unsigned int nMasterKeyMaxID;
 
     CWallet() { SetNull(); }
 
     CWallet(const std::string &strWalletFileIn)
         : strWalletFile(strWalletFileIn) {
         SetNull();
         fFileBacked = true;
     }
 
     ~CWallet() {
         delete pwalletdbEncryption;
         pwalletdbEncryption = nullptr;
     }
 
     void SetNull() {
         nWalletVersion = FEATURE_BASE;
         nWalletMaxVersion = FEATURE_BASE;
         fFileBacked = false;
         nMasterKeyMaxID = 0;
         pwalletdbEncryption = nullptr;
         nOrderPosNext = 0;
         nNextResend = 0;
         nLastResend = 0;
         nTimeFirstKey = 0;
         fBroadcastTransactions = false;
     }
 
     std::map<uint256, CWalletTx> mapWallet;
     std::list<CAccountingEntry> laccentries;
 
     typedef std::pair<CWalletTx *, CAccountingEntry *> TxPair;
     typedef std::multimap<int64_t, TxPair> TxItems;
     TxItems wtxOrdered;
 
     int64_t nOrderPosNext;
     std::map<uint256, int> mapRequestCount;
 
     std::map<CTxDestination, CAddressBookData> mapAddressBook;
 
     CPubKey vchDefaultKey;
 
     std::set<COutPoint> setLockedCoins;
 
     const CWalletTx *GetWalletTx(const uint256 &hash) const;
 
     //! check whether we are allowed to upgrade (or already support) to the
     //! named feature
     bool CanSupportFeature(enum WalletFeature wf) {
         AssertLockHeld(cs_wallet);
         return nWalletMaxVersion >= wf;
     }
 
     /**
      * populate vCoins with vector of available COutputs.
      */
     void AvailableCoins(std::vector<COutput> &vCoins,
                         bool fOnlyConfirmed = true,
                         const CCoinControl *coinControl = nullptr,
                         bool fIncludeZeroValue = false) const;
 
     /**
      * Shuffle and select coins until nTargetValue is reached while avoiding
      * small change; This method is stochastic for some inputs and upon
      * completion the coin set and corresponding actual target value is
      * assembled.
      */
     bool SelectCoinsMinConf(
         const Amount nTargetValue, int nConfMine, int nConfTheirs,
         uint64_t nMaxAncestors, std::vector<COutput> vCoins,
         std::set<std::pair<const CWalletTx *, unsigned int>> &setCoinsRet,
         Amount &nValueRet) const;
 
     bool IsSpent(const uint256 &hash, unsigned int n) const;
 
     bool IsLockedCoin(uint256 hash, unsigned int n) const;
     void LockCoin(const COutPoint &output);
     void UnlockCoin(const COutPoint &output);
     void UnlockAllCoins();
     void ListLockedCoins(std::vector<COutPoint> &vOutpts);
 
     /**
      * keystore implementation
      * Generate a new key
      */
     CPubKey GenerateNewKey(bool internal = false);
     //! Adds a key to the store, and saves it to disk.
     bool AddKeyPubKey(const CKey &key, const CPubKey &pubkey) override;
     //! Adds a key to the store, without saving it to disk (used by LoadWallet)
     bool LoadKey(const CKey &key, const CPubKey &pubkey) {
         return CCryptoKeyStore::AddKeyPubKey(key, pubkey);
     }
 
     //! Load metadata (used by LoadWallet)
     bool LoadKeyMetadata(const CTxDestination &pubKey,
                          const CKeyMetadata &metadata);
 
     bool LoadMinVersion(int nVersion) {
         AssertLockHeld(cs_wallet);
         nWalletVersion = nVersion;
         nWalletMaxVersion = std::max(nWalletMaxVersion, nVersion);
         return true;
     }
     void UpdateTimeFirstKey(int64_t nCreateTime);
 
     //! Adds an encrypted key to the store, and saves it to disk.
     bool AddCryptedKey(const CPubKey &vchPubKey,
                        const std::vector<uint8_t> &vchCryptedSecret) override;
     //! Adds an encrypted key to the store, without saving it to disk (used by
     //! LoadWallet)
     bool LoadCryptedKey(const CPubKey &vchPubKey,
                         const std::vector<uint8_t> &vchCryptedSecret);
     bool AddCScript(const CScript &redeemScript) override;
     bool LoadCScript(const CScript &redeemScript);
 
     //! Adds a destination data tuple to the store, and saves it to disk
     bool AddDestData(const CTxDestination &dest, const std::string &key,
                      const std::string &value);
     //! Erases a destination data tuple in the store and on disk
     bool EraseDestData(const CTxDestination &dest, const std::string &key);
     //! Adds a destination data tuple to the store, without saving it to disk
     bool LoadDestData(const CTxDestination &dest, const std::string &key,
                       const std::string &value);
     //! Look up a destination data tuple in the store, return true if found
     //! false otherwise
     bool GetDestData(const CTxDestination &dest, const std::string &key,
                      std::string *value) const;
 
     //! Adds a watch-only address to the store, and saves it to disk.
     bool AddWatchOnly(const CScript &dest, int64_t nCreateTime);
     bool RemoveWatchOnly(const CScript &dest) override;
     //! Adds a watch-only address to the store, without saving it to disk (used
     //! by LoadWallet)
     bool LoadWatchOnly(const CScript &dest);
 
     //! Holds a timestamp at which point the wallet is scheduled (externally) to
     //! be relocked. Caller must arrange for actual relocking to occur via
     //! Lock().
     int64_t nRelockTime;
 
     bool Unlock(const SecureString &strWalletPassphrase);
     bool ChangeWalletPassphrase(const SecureString &strOldWalletPassphrase,
                                 const SecureString &strNewWalletPassphrase);
     bool EncryptWallet(const SecureString &strWalletPassphrase);
 
     void GetKeyBirthTimes(std::map<CTxDestination, int64_t> &mapKeyBirth) const;
     unsigned int ComputeTimeSmart(const CWalletTx &wtx) const;
 
     /**
      * Increment the next transaction order id
      * @return next transaction order id
      */
     int64_t IncOrderPosNext(CWalletDB *pwalletdb = nullptr);
     DBErrors ReorderTransactions();
     bool AccountMove(std::string strFrom, std::string strTo,
                      const Amount nAmount, std::string strComment = "");
     bool GetAccountPubkey(CPubKey &pubKey, std::string strAccount,
                           bool bForceNew = false);
 
     void MarkDirty();
     bool AddToWallet(const CWalletTx &wtxIn, bool fFlushOnClose = true);
     bool LoadToWallet(const CWalletTx &wtxIn);
-    void SyncTransaction(const CTransaction &tx, const CBlockIndex *pindex,
-                         int posInBlock) override;
-    bool AddToWalletIfInvolvingMe(const CTransaction &tx,
+    void TransactionAddedToMempool(const CTransactionRef &tx) override;
+    void
+    BlockConnected(const std::shared_ptr<const CBlock> &pblock,
+                   const CBlockIndex *pindex,
+                   const std::vector<CTransactionRef> &vtxConflicted) override;
+    void
+    BlockDisconnected(const std::shared_ptr<const CBlock> &pblock) override;
+    bool AddToWalletIfInvolvingMe(const CTransactionRef &tx,
                                   const CBlockIndex *pIndex, int posInBlock,
                                   bool fUpdate);
     CBlockIndex *ScanForWalletTransactions(CBlockIndex *pindexStart,
                                            bool fUpdate = false);
     void ReacceptWalletTransactions();
     void ResendWalletTransactions(int64_t nBestBlockTime,
                                   CConnman *connman) override;
     std::vector<uint256> ResendWalletTransactionsBefore(int64_t nTime,
                                                         CConnman *connman);
     Amount GetBalance() const;
     Amount GetUnconfirmedBalance() const;
     Amount GetImmatureBalance() const;
     Amount GetWatchOnlyBalance() const;
     Amount GetUnconfirmedWatchOnlyBalance() const;
     Amount GetImmatureWatchOnlyBalance() const;
 
     /**
      * Insert additional inputs into the transaction by calling
      * CreateTransaction();
      */
     bool FundTransaction(CMutableTransaction &tx, Amount &nFeeRet,
                          bool overrideEstimatedFeeRate,
                          const CFeeRate &specificFeeRate, int &nChangePosInOut,
                          std::string &strFailReason, bool includeWatching,
                          bool lockUnspents,
                          const std::set<int> &setSubtractFeeFromOutputs,
                          bool keepReserveKey = true,
                          const CTxDestination &destChange = CNoDestination());
 
     /**
      * Create a new transaction paying the recipients with a set of coins
      * selected by SelectCoins(); Also create the change output, when needed
      * @note passing nChangePosInOut as -1 will result in setting a random
      * position
      */
     bool CreateTransaction(const std::vector<CRecipient> &vecSend,
                            CWalletTx &wtxNew, CReserveKey &reservekey,
                            Amount &nFeeRet, int &nChangePosInOut,
                            std::string &strFailReason,
                            const CCoinControl *coinControl = nullptr,
                            bool sign = true);
     bool CommitTransaction(CWalletTx &wtxNew, CReserveKey &reservekey,
                            CConnman *connman, CValidationState &state);
 
     void ListAccountCreditDebit(const std::string &strAccount,
                                 std::list<CAccountingEntry> &entries);
     bool AddAccountingEntry(const CAccountingEntry &);
     bool AddAccountingEntry(const CAccountingEntry &, CWalletDB *pwalletdb);
     template <typename ContainerType>
     bool DummySignTx(CMutableTransaction &txNew, const ContainerType &coins);
 
     static CFeeRate minTxFee;
     static CFeeRate fallbackFee;
     /**
      * Estimate the minimum fee considering user set parameters and the required
      * fee
      */
     static Amount GetMinimumFee(unsigned int nTxBytes,
                                 unsigned int nConfirmTarget,
                                 const CTxMemPool &pool);
     /**
      * Estimate the minimum fee considering required fee and targetFee or if 0
      * then fee estimation for nConfirmTarget
      */
     static Amount GetMinimumFee(unsigned int nTxBytes,
                                 unsigned int nConfirmTarget,
                                 const CTxMemPool &pool, Amount targetFee);
     /**
      * Return the minimum required fee taking into account the floating relay
      * fee and user set minimum transaction fee
      */
     static Amount GetRequiredFee(unsigned int nTxBytes);
 
     bool NewKeyPool();
     size_t KeypoolCountExternalKeys();
     bool TopUpKeyPool(unsigned int kpSize = 0);
     void ReserveKeyFromKeyPool(int64_t &nIndex, CKeyPool &keypool,
                                bool internal);
     void KeepKey(int64_t nIndex);
     void ReturnKey(int64_t nIndex);
     bool GetKeyFromPool(CPubKey &key, bool internal = false);
     int64_t GetOldestKeyPoolTime();
     void GetAllReserveKeys(std::set<CKeyID> &setAddress) const;
 
     std::set<std::set<CTxDestination>> GetAddressGroupings();
     std::map<CTxDestination, Amount> GetAddressBalances();
 
     Amount GetAccountBalance(const std::string &strAccount, int nMinDepth,
                              const isminefilter &filter);
     Amount GetAccountBalance(CWalletDB &walletdb, const std::string &strAccount,
                              int nMinDepth, const isminefilter &filter);
     std::set<CTxDestination>
     GetAccountAddresses(const std::string &strAccount) const;
 
     isminetype IsMine(const CTxIn &txin) const;
     /**
      * Returns amount of debit if the input matches the filter, otherwise
      * returns 0
      */
     Amount GetDebit(const CTxIn &txin, const isminefilter &filter) const;
     isminetype IsMine(const CTxOut &txout) const;
     Amount GetCredit(const CTxOut &txout, const isminefilter &filter) const;
     bool IsChange(const CTxOut &txout) const;
     Amount GetChange(const CTxOut &txout) const;
     bool IsMine(const CTransaction &tx) const;
     /** should probably be renamed to IsRelevantToMe */
     bool IsFromMe(const CTransaction &tx) const;
     Amount GetDebit(const CTransaction &tx, const isminefilter &filter) const;
     /** Returns whether all of the inputs match the filter */
     bool IsAllFromMe(const CTransaction &tx, const isminefilter &filter) const;
     Amount GetCredit(const CTransaction &tx, const isminefilter &filter) const;
     Amount GetChange(const CTransaction &tx) const;
     void SetBestChain(const CBlockLocator &loc) override;
 
     DBErrors LoadWallet(bool &fFirstRunRet);
     DBErrors ZapWalletTx(std::vector<CWalletTx> &vWtx);
     DBErrors ZapSelectTx(std::vector<uint256> &vHashIn,
                          std::vector<uint256> &vHashOut);
 
     bool SetAddressBook(const CTxDestination &address,
                         const std::string &strName, const std::string &purpose);
 
     bool DelAddressBook(const CTxDestination &address);
 
-    void UpdatedTransaction(const uint256 &hashTx) override;
-
     void Inventory(const uint256 &hash) override {
         LOCK(cs_wallet);
         std::map<uint256, int>::iterator mi = mapRequestCount.find(hash);
         if (mi != mapRequestCount.end()) {
             (*mi).second++;
         }
     }
 
     void GetScriptForMining(std::shared_ptr<CReserveScript> &script) override;
-    void ResetRequestCount(const uint256 &hash) override {
-        LOCK(cs_wallet);
-        mapRequestCount[hash] = 0;
-    };
 
     unsigned int GetKeyPoolSize() {
         // setKeyPool
         AssertLockHeld(cs_wallet);
         return setKeyPool.size();
     }
 
     bool SetDefaultKey(const CPubKey &vchPubKey);
 
     //! signify that a particular wallet feature is now used. this may change
     //! nWalletVersion and nWalletMaxVersion if those are lower
     bool SetMinVersion(enum WalletFeature, CWalletDB *pwalletdbIn = nullptr,
                        bool fExplicit = false);
 
     //! change which version we're allowed to upgrade to (note that this does
     //! not immediately imply upgrading to that format)
     bool SetMaxVersion(int nVersion);
 
     //! get the current wallet format (the oldest client version guaranteed to
     //! understand this wallet)
     int GetVersion() {
         LOCK(cs_wallet);
         return nWalletVersion;
     }
 
     //! Get wallet transactions that conflict with given transaction (spend same
     //! outputs)
     std::set<uint256> GetConflicts(const uint256 &txid) const;
 
     //! Check if a given transaction has any of its outputs spent by another
     //! transaction in the wallet
     bool HasWalletSpend(const uint256 &txid) const;
 
     //! Flush wallet (bitdb flush)
     void Flush(bool shutdown = false);
 
     //! Verify the wallet database and perform salvage if required
     static bool Verify();
 
     /**
      * Address book entry changed.
      * @note called with lock cs_wallet held.
      */
     boost::signals2::signal<void(CWallet *wallet, const CTxDestination &address,
                                  const std::string &label, bool isMine,
                                  const std::string &purpose, ChangeType status)>
         NotifyAddressBookChanged;
 
     /**
      * Wallet transaction added, removed or updated.
      * @note called with lock cs_wallet held.
      */
     boost::signals2::signal<void(CWallet *wallet, const uint256 &hashTx,
                                  ChangeType status)>
         NotifyTransactionChanged;
 
     /** Show progress e.g. for rescan */
     boost::signals2::signal<void(const std::string &title, int nProgress)>
         ShowProgress;
 
     /** Watch-only address added */
     boost::signals2::signal<void(bool fHaveWatchOnly)> NotifyWatchonlyChanged;
 
     /** Inquire whether this wallet broadcasts transactions. */
     bool GetBroadcastTransactions() const { return fBroadcastTransactions; }
     /** Set whether this wallet broadcasts transactions. */
     void SetBroadcastTransactions(bool broadcast) {
         fBroadcastTransactions = broadcast;
     }
 
     /**
      * Mark a transaction (and it in-wallet descendants) as abandoned so its
      * inputs may be respent.
      */
     bool AbandonTransaction(const uint256 &hashTx);
 
     /**
      * Mark a transaction as replaced by another transaction (e.g., BIP 125).
      */
     bool MarkReplaced(const uint256 &originalHash, const uint256 &newHash);
 
     /* Returns the wallets help message */
     static std::string GetWalletHelpString(bool showDebug);
 
     /**
      * Initializes the wallet, returns a new CWallet instance or a null pointer
      * in case of an error.
      */
     static CWallet *CreateWalletFromFile(const std::string walletFile);
     static bool InitLoadWallet();
 
     /**
      * Wallet post-init setup
      * Gives the wallet a chance to register repetitive tasks and complete
      * post-init tasks
      */
     void postInitProcess(CScheduler &scheduler);
 
     /* Wallets parameter interaction */
     static bool ParameterInteraction();
 
     bool BackupWallet(const std::string &strDest);
 
     /* Set the HD chain model (chain child index counters) */
     bool SetHDChain(const CHDChain &chain, bool memonly);
     const CHDChain &GetHDChain() { return hdChain; }
 
     /* Returns true if HD is enabled */
     bool IsHDEnabled();
 
     /* Generates a new HD master key (will not be activated) */
     CPubKey GenerateNewHDMasterKey();
 
     /**
      * Set the current HD master key (will reset the chain child index counters)
      * If possibleOldChain is provided, the parameters from the old chain
      * (version) will be preserved.
      */
     bool SetHDMasterKey(const CPubKey &key,
                         CHDChain *possibleOldChain = nullptr);
 };
 
 /** A key allocated from the key pool. */
 class CReserveKey : public CReserveScript {
 protected:
     CWallet *pwallet;
     int64_t nIndex;
     CPubKey vchPubKey;
 
 public:
     CReserveKey(CWallet *pwalletIn) {
         nIndex = -1;
         pwallet = pwalletIn;
     }
 
     ~CReserveKey() { ReturnKey(); }
 
     void ReturnKey();
     bool GetReservedKey(CPubKey &pubkey, bool internal = false);
     void KeepKey();
     void KeepScript() override { KeepKey(); }
 };
 
 /**
  * Account information.
  * Stored in wallet with key "acc"+string account name.
  */
 class CAccount {
 public:
     CPubKey vchPubKey;
 
     CAccount() { SetNull(); }
 
     void SetNull() { vchPubKey = CPubKey(); }
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         int nVersion = s.GetVersion();
         if (!(s.GetType() & SER_GETHASH)) {
             READWRITE(nVersion);
         }
 
         READWRITE(vchPubKey);
     }
 };
 
 // Helper for producing a bunch of max-sized low-S signatures (eg 72 bytes)
 // ContainerType is meant to hold pair<CWalletTx *, int>, and be iterable so
 // that each entry corresponds to each vIn, in order.
 template <typename ContainerType>
 bool CWallet::DummySignTx(CMutableTransaction &txNew,
                           const ContainerType &coins) {
     // Fill in dummy signatures for fee calculation.
     int nIn = 0;
     for (const auto &coin : coins) {
         const CScript &scriptPubKey =
             coin.first->tx->vout[coin.second].scriptPubKey;
         SignatureData sigdata;
 
         if (!ProduceSignature(DummySignatureCreator(this), scriptPubKey,
                               sigdata)) {
             return false;
         } else {
             UpdateTransaction(txNew, nIn, sigdata);
         }
 
         nIn++;
     }
 
     return true;
 }
 
 #endif // BITCOIN_WALLET_WALLET_H
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index 53c46eab1..97fa5ac20 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -1,150 +1,172 @@
 // Copyright (c) 2015-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include "zmqnotificationinterface.h"
 #include "zmqpublishnotifier.h"
 
 #include "streams.h"
 #include "util.h"
 #include "validation.h"
 #include "version.h"
 
 void zmqError(const char *str) {
     LogPrint(BCLog::ZMQ, "zmq: Error: %s, errno=%s\n", str,
              zmq_strerror(errno));
 }
 
 CZMQNotificationInterface::CZMQNotificationInterface() : pcontext(nullptr) {}
 
 CZMQNotificationInterface::~CZMQNotificationInterface() {
     Shutdown();
 
     for (std::list<CZMQAbstractNotifier *>::iterator i = notifiers.begin();
          i != notifiers.end(); ++i) {
         delete *i;
     }
 }
 
 CZMQNotificationInterface *CZMQNotificationInterface::Create() {
     CZMQNotificationInterface *notificationInterface = nullptr;
     std::map<std::string, CZMQNotifierFactory> factories;
     std::list<CZMQAbstractNotifier *> notifiers;
 
     factories["pubhashblock"] =
         CZMQAbstractNotifier::Create<CZMQPublishHashBlockNotifier>;
     factories["pubhashtx"] =
         CZMQAbstractNotifier::Create<CZMQPublishHashTransactionNotifier>;
     factories["pubrawblock"] =
         CZMQAbstractNotifier::Create<CZMQPublishRawBlockNotifier>;
     factories["pubrawtx"] =
         CZMQAbstractNotifier::Create<CZMQPublishRawTransactionNotifier>;
 
     for (std::map<std::string, CZMQNotifierFactory>::const_iterator i =
              factories.begin();
          i != factories.end(); ++i) {
         std::string arg("-zmq" + i->first);
         if (IsArgSet(arg)) {
             CZMQNotifierFactory factory = i->second;
             std::string address = GetArg(arg, "");
             CZMQAbstractNotifier *notifier = factory();
             notifier->SetType(i->first);
             notifier->SetAddress(address);
             notifiers.push_back(notifier);
         }
     }
 
     if (!notifiers.empty()) {
         notificationInterface = new CZMQNotificationInterface();
         notificationInterface->notifiers = notifiers;
 
         if (!notificationInterface->Initialize()) {
             delete notificationInterface;
             notificationInterface = nullptr;
         }
     }
 
     return notificationInterface;
 }
 
 // Called at startup to conditionally set up ZMQ socket(s)
 bool CZMQNotificationInterface::Initialize() {
     LogPrint(BCLog::ZMQ, "zmq: Initialize notification interface\n");
     assert(!pcontext);
 
     pcontext = zmq_init(1);
 
     if (!pcontext) {
         zmqError("Unable to initialize context");
         return false;
     }
 
     std::list<CZMQAbstractNotifier *>::iterator i = notifiers.begin();
     for (; i != notifiers.end(); ++i) {
         CZMQAbstractNotifier *notifier = *i;
         if (notifier->Initialize(pcontext)) {
             LogPrint(BCLog::ZMQ, "  Notifier %s ready (address = %s)\n",
                      notifier->GetType(), notifier->GetAddress());
         } else {
             LogPrint(BCLog::ZMQ, "  Notifier %s failed (address = %s)\n",
                      notifier->GetType(), notifier->GetAddress());
             break;
         }
     }
 
     if (i != notifiers.end()) {
         return false;
     }
 
     return true;
 }
 
 // Called during shutdown sequence
 void CZMQNotificationInterface::Shutdown() {
     LogPrint(BCLog::ZMQ, "zmq: Shutdown notification interface\n");
     if (pcontext) {
         for (std::list<CZMQAbstractNotifier *>::iterator i = notifiers.begin();
              i != notifiers.end(); ++i) {
             CZMQAbstractNotifier *notifier = *i;
             LogPrint(BCLog::ZMQ, "   Shutdown notifier %s at %s\n",
                      notifier->GetType(), notifier->GetAddress());
             notifier->Shutdown();
         }
         zmq_ctx_destroy(pcontext);
 
         pcontext = 0;
     }
 }
 
 void CZMQNotificationInterface::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                                 const CBlockIndex *pindexFork,
                                                 bool fInitialDownload) {
     // In IBD or blocks were disconnected without any new ones
     if (fInitialDownload || pindexNew == pindexFork) return;
 
     for (std::list<CZMQAbstractNotifier *>::iterator i = notifiers.begin();
          i != notifiers.end();) {
         CZMQAbstractNotifier *notifier = *i;
         if (notifier->NotifyBlock(pindexNew)) {
             i++;
         } else {
             notifier->Shutdown();
             i = notifiers.erase(i);
         }
     }
 }
 
-void CZMQNotificationInterface::SyncTransaction(const CTransaction &tx,
-                                                const CBlockIndex *pindex,
-                                                int posInBlock) {
+void CZMQNotificationInterface::TransactionAddedToMempool(
+    const CTransactionRef &ptx) {
+    // Used by BlockConnected and BlockDisconnected as well, because they're all
+    // the same external callback.
+    const CTransaction &tx = *ptx;
+
     for (std::list<CZMQAbstractNotifier *>::iterator i = notifiers.begin();
          i != notifiers.end();) {
         CZMQAbstractNotifier *notifier = *i;
         if (notifier->NotifyTransaction(tx)) {
             i++;
         } else {
             notifier->Shutdown();
             i = notifiers.erase(i);
         }
     }
 }
+
+void CZMQNotificationInterface::BlockConnected(
+    const std::shared_ptr<const CBlock> &pblock,
+    const CBlockIndex *pindexConnected,
+    const std::vector<CTransactionRef> &vtxConflicted) {
+    for (const CTransactionRef &ptx : pblock->vtx) {
+        // Do a normal notify for each transaction added in the block
+        TransactionAddedToMempool(ptx);
+    }
+}
+
+void CZMQNotificationInterface::BlockDisconnected(
+    const std::shared_ptr<const CBlock> &pblock) {
+    for (const CTransactionRef &ptx : pblock->vtx) {
+        // Do a normal notify for each transaction removed in block
+        // disconnection
+        TransactionAddedToMempool(ptx);
+    }
+}
diff --git a/src/zmq/zmqnotificationinterface.h b/src/zmq/zmqnotificationinterface.h
index d5e74ff56..a00f1a539 100644
--- a/src/zmq/zmqnotificationinterface.h
+++ b/src/zmq/zmqnotificationinterface.h
@@ -1,40 +1,45 @@
 // Copyright (c) 2015-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_ZMQ_ZMQNOTIFICATIONINTERFACE_H
 #define BITCOIN_ZMQ_ZMQNOTIFICATIONINTERFACE_H
 
 #include "validationinterface.h"
 
+#include <list>
 #include <map>
-#include <string>
 
 class CBlockIndex;
 class CZMQAbstractNotifier;
 
 class CZMQNotificationInterface : public CValidationInterface {
 public:
     virtual ~CZMQNotificationInterface();
 
     static CZMQNotificationInterface *Create();
 
 protected:
     bool Initialize();
     void Shutdown();
 
     // CValidationInterface
-    void SyncTransaction(const CTransaction &tx, const CBlockIndex *pindex,
-                         int posInBlock) override;
+    void TransactionAddedToMempool(const CTransactionRef &tx) override;
+    void
+    BlockConnected(const std::shared_ptr<const CBlock> &pblock,
+                   const CBlockIndex *pindexConnected,
+                   const std::vector<CTransactionRef> &vtxConflicted) override;
+    void
+    BlockDisconnected(const std::shared_ptr<const CBlock> &pblock) override;
     void UpdatedBlockTip(const CBlockIndex *pindexNew,
                          const CBlockIndex *pindexFork,
                          bool fInitialDownload) override;
 
 private:
     CZMQNotificationInterface();
 
     void *pcontext;
     std::list<CZMQAbstractNotifier *> notifiers;
 };
 
 #endif // BITCOIN_ZMQ_ZMQNOTIFICATIONINTERFACE_H