diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h
index 000319a42..e8c007156 100644
--- a/src/avalanche/processor.h
+++ b/src/avalanche/processor.h
@@ -1,438 +1,460 @@
 // Copyright (c) 2018-2019 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_AVALANCHE_PROCESSOR_H
 #define BITCOIN_AVALANCHE_PROCESSOR_H
 
 #include <avalanche/config.h>
 #include <avalanche/node.h>
 #include <avalanche/proof.h>
 #include <avalanche/proofcomparator.h>
 #include <avalanche/protocol.h>
 #include <avalanche/voterecord.h> // For AVALANCHE_MAX_INFLIGHT_POLL
 #include <blockindex.h>
 #include <blockindexcomparators.h>
 #include <bloom.h>
 #include <eventloop.h>
 #include <interfaces/chain.h>
 #include <interfaces/handler.h>
 #include <key.h>
 #include <net.h>
 #include <primitives/transaction.h>
 #include <rwcollection.h>
 #include <util/variant.h>
 
 #include <boost/multi_index/composite_key.hpp>
 #include <boost/multi_index/hashed_index.hpp>
 #include <boost/multi_index/member.hpp>
 #include <boost/multi_index/ordered_index.hpp>
 #include <boost/multi_index_container.hpp>
 
 #include <atomic>
 #include <chrono>
 #include <cstdint>
 #include <memory>
 #include <unordered_map>
 #include <variant>
 #include <vector>
 
 class ArgsManager;
 class CConnman;
 class CNode;
 class CScheduler;
 class Config;
 class PeerManager;
 struct bilingual_str;
 
 /**
  * Maximum item that can be polled at once.
  */
 static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16;
 
 /**
  * How long before we consider that a query timed out.
  */
 static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{
     10000};
 
 /**
  * The size of the finalized items filter. It should be large enough that an
  * influx of inventories cannot roll any particular item out of the filter on
  * demand. For example, transactions will roll blocks out of the filter.
  * Tracking many more items than can possibly be polled at once ensures that
  * recently polled items will come to a stable state on the network before
  * rolling out of the filter.
  */
 static constexpr uint32_t AVALANCHE_FINALIZED_ITEMS_FILTER_NUM_ELEMENTS =
     AVALANCHE_MAX_INFLIGHT_POLL * 20;
 
 namespace avalanche {
 
 class Delegation;
 class PeerManager;
 class ProofRegistrationState;
 struct VoteRecord;
 
 enum struct VoteStatus : uint8_t {
     Invalid,
     Rejected,
     Accepted,
     Finalized,
     Stale,
 };
 
 using AnyVoteItem =
     std::variant<const ProofRef, const CBlockIndex *, const CTransactionRef>;
 
 class VoteItemUpdate {
     AnyVoteItem item;
     VoteStatus status;
 
 public:
     VoteItemUpdate(AnyVoteItem itemIn, VoteStatus statusIn)
         : item(std::move(itemIn)), status(statusIn) {}
 
     const VoteStatus &getStatus() const { return status; }
     const AnyVoteItem &getVoteItem() const { return item; }
 };
 
 class VoteMapComparator {
     const CTxMemPool *mempool{nullptr};
 
 public:
     VoteMapComparator() {}
     VoteMapComparator(const CTxMemPool *mempoolIn) : mempool(mempoolIn) {}
 
     bool operator()(const AnyVoteItem &lhs, const AnyVoteItem &rhs) const {
         // If the variants are of different types, sort them by variant index
         if (lhs.index() != rhs.index()) {
             return lhs.index() < rhs.index();
         }
 
         return std::visit(
             variant::overloaded{
                 [](const ProofRef &lhs, const ProofRef &rhs) {
                     return ProofComparatorByScore()(lhs, rhs);
                 },
                 [](const CBlockIndex *lhs, const CBlockIndex *rhs) {
                     // Reverse ordering so we get the highest work first
                     return CBlockIndexWorkComparator()(rhs, lhs);
                 },
                 [this](const CTransactionRef &lhs, const CTransactionRef &rhs) {
                     const TxId &lhsTxId = lhs->GetId();
                     const TxId &rhsTxId = rhs->GetId();
 
                     // If there is no mempool, sort by TxId. Note that polling
                     // for txs is currently not supported if there is no mempool
                     // so this is only a safety net.
                     if (!mempool) {
                         return lhsTxId < rhsTxId;
                     }
 
                     LOCK(mempool->cs);
 
                     auto lhsOptIter = mempool->GetIter(lhsTxId);
                     auto rhsOptIter = mempool->GetIter(rhsTxId);
 
                     // If the transactions are not in the mempool, tie by TxId
                     if (!lhsOptIter && !rhsOptIter) {
                         return lhsTxId < rhsTxId;
                     }
 
                     // If only one is in the mempool, pick that one
                     if (lhsOptIter.has_value() != rhsOptIter.has_value()) {
                         return !!lhsOptIter;
                     }
 
                     // Both are in the mempool, select the highest fee rate
                     // including the fee deltas
                     return CompareTxMemPoolEntryByModifiedFeeRate{}(
                         **lhsOptIter, **rhsOptIter);
                 },
                 [](const auto &lhs, const auto &rhs) {
                     // This serves 2 purposes:
                     //  - This makes sure that we don't forget to implement a
                     //    comparison case when adding a new variant type.
                     //  - This avoids having to write all the cross type cases
                     //    which are already handled by the index sort above.
                     //    Because the compiler has no way to determine that, we
                     //    cannot use static assertions here without having to
                     //    define the whole type matrix also.
                     assert(false);
                     // Return any bool, it's only there to make the compiler
                     // happy.
                     return false;
                 },
             },
             lhs, rhs);
     }
 };
 using VoteMap = std::map<AnyVoteItem, VoteRecord, VoteMapComparator>;
 
 struct query_timeout {};
 
 namespace {
     struct AvalancheTest;
 }
 
 // FIXME Implement a proper notification handler for node disconnection instead
 // of implementing the whole NetEventsInterface for a single interesting event.
 class Processor final : public NetEventsInterface {
     Config avaconfig;
     CConnman *connman;
     ChainstateManager &chainman;
     CTxMemPool *mempool;
 
     /**
      * Items to run avalanche on.
      */
     RWCollection<VoteMap> voteRecords;
 
     /**
      * Keep track of peers and queries sent.
      */
     std::atomic<uint64_t> round;
 
     /**
      * Keep track of the peers and associated infos.
      */
     mutable Mutex cs_peerManager;
     std::unique_ptr<PeerManager> peerManager GUARDED_BY(cs_peerManager);
 
     struct Query {
         NodeId nodeid;
         uint64_t round;
         SteadyMilliseconds timeout;
 
         /**
          * We declare this as mutable so it can be modified in the multi_index.
          * This is ok because we do not use this field to index in anyway.
          *
          * /!\ Do not use any mutable field as index.
          */
         mutable std::vector<CInv> invs;
     };
 
     using QuerySet = boost::multi_index_container<
         Query,
         boost::multi_index::indexed_by<
             // index by nodeid/round
             boost::multi_index::hashed_unique<boost::multi_index::composite_key<
                 Query,
                 boost::multi_index::member<Query, NodeId, &Query::nodeid>,
                 boost::multi_index::member<Query, uint64_t, &Query::round>>>,
             // sorted by timeout
             boost::multi_index::ordered_non_unique<
                 boost::multi_index::tag<query_timeout>,
                 boost::multi_index::member<Query, SteadyMilliseconds,
                                            &Query::timeout>>>>;
 
     RWCollection<QuerySet> queries;
 
     /** Data required to participate. */
     struct PeerData;
     std::unique_ptr<PeerData> peerData;
     CKey sessionKey;
 
     /** Event loop machinery. */
     EventLoop eventLoop;
 
     /**
      * Quorum management.
      */
     uint32_t minQuorumScore;
     double minQuorumConnectedScoreRatio;
     std::atomic<bool> quorumIsEstablished{false};
     std::atomic<bool> m_canShareLocalProof{false};
     int64_t minAvaproofsNodeCount;
     std::atomic<int64_t> avaproofsNodeCounter{0};
 
     /** Voting parameters. */
     const uint32_t staleVoteThreshold;
     const uint32_t staleVoteFactor;
 
     /** Registered interfaces::Chain::Notifications handler. */
     class NotificationsHandler;
     std::unique_ptr<interfaces::Handler> chainNotificationsHandler;
 
     mutable Mutex cs_finalizationTip;
     const CBlockIndex *finalizationTip GUARDED_BY(cs_finalizationTip){nullptr};
 
     mutable Mutex cs_delayedAvahelloNodeIds;
     /**
      * A list of the nodes that did not get our proof announced via avahello
      * yet because we had no inbound connection.
      */
     std::unordered_set<NodeId>
         delayedAvahelloNodeIds GUARDED_BY(cs_delayedAvahelloNodeIds);
 
     struct StakingReward {
         int blockheight;
         CScript winner;
         std::vector<CScript> acceptableWinners;
     };
 
     mutable Mutex cs_stakingRewards;
     std::unordered_map<BlockHash, StakingReward, SaltedUint256Hasher>
         stakingRewards GUARDED_BY(cs_stakingRewards);
 
     Processor(Config avaconfig, interfaces::Chain &chain, CConnman *connmanIn,
               ChainstateManager &chainman, CTxMemPool *mempoolIn,
               CScheduler &scheduler, std::unique_ptr<PeerData> peerDataIn,
               CKey sessionKeyIn, uint32_t minQuorumTotalScoreIn,
               double minQuorumConnectedScoreRatioIn,
               int64_t minAvaproofsNodeCountIn, uint32_t staleVoteThresholdIn,
               uint32_t staleVoteFactorIn, Amount stakeUtxoDustThresholdIn);
 
 public:
     ~Processor();
 
     static std::unique_ptr<Processor>
     MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain,
                   CConnman *connman, ChainstateManager &chainman,
                   CTxMemPool *mempoolIn, CScheduler &scheduler,
                   bilingual_str &error);
 
-    bool addToReconcile(const AnyVoteItem &item);
+    bool addToReconcile(const AnyVoteItem &item)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
     /**
      * Wrapper around the addToReconcile for proofs that adds back the
      * finalization flag to the peer if it is not polled due to being recently
      * finalized.
      */
-    bool reconcileOrFinalize(const ProofRef &proof);
+    bool reconcileOrFinalize(const ProofRef &proof)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
     bool isAccepted(const AnyVoteItem &item) const;
     int getConfidence(const AnyVoteItem &item) const;
 
-    bool isRecentlyFinalized(const uint256 &itemId) const;
-    void clearFinalizedItems();
+    bool isRecentlyFinalized(const uint256 &itemId) const
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
+    void clearFinalizedItems() EXCLUSIVE_LOCKS_REQUIRED(!cs_finalizedItems);
 
     // TODO: Refactor the API to remove the dependency on avalanche/protocol.h
     void sendResponse(CNode *pfrom, Response response) const;
     bool registerVotes(NodeId nodeid, const Response &response,
                        std::vector<VoteItemUpdate> &updates, int &banscore,
-                       std::string &error);
+                       std::string &error)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems,
+                                 !cs_invalidatedBlocks, !cs_finalizationTip);
 
-    template <typename Callable> auto withPeerManager(Callable &&func) const {
+    template <typename Callable>
+    auto withPeerManager(Callable &&func) const
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager) {
         LOCK(cs_peerManager);
         return func(*peerManager);
     }
 
     CPubKey getSessionPubKey() const;
     /**
      * @brief Send a avahello message
      *
      * @param pfrom The node to send the message to
      * @return True if a non-null delegation has been announced
      */
-    bool sendHello(CNode *pfrom);
-    void sendDelayedAvahello();
+    bool sendHello(CNode *pfrom)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds);
+    void sendDelayedAvahello()
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_delayedAvahelloNodeIds);
 
     ProofRef getLocalProof() const;
     ProofRegistrationState getLocalProofRegistrationState() const;
 
     /*
      * Return whether the avalanche service flag should be set.
      */
     bool isAvalancheServiceAvailable() { return !!peerData; }
 
     bool startEventLoop(CScheduler &scheduler);
     bool stopEventLoop();
 
-    void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main);
+    void avaproofsSent(NodeId nodeid) LOCKS_EXCLUDED(cs_main)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager);
     int64_t getAvaproofsNodeCounter() const {
         return avaproofsNodeCounter.load();
     }
-    bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main);
+    bool isQuorumEstablished() LOCKS_EXCLUDED(cs_main)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_stakingRewards);
     bool canShareLocalProof();
 
-    bool computeStakingReward(const CBlockIndex *pindex);
-    bool eraseStakingRewardWinner(const BlockHash &prevBlockHash);
-    void cleanupStakingRewards(const int minHeight);
+    bool computeStakingReward(const CBlockIndex *pindex)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_stakingRewards);
+    bool eraseStakingRewardWinner(const BlockHash &prevBlockHash)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
+    void cleanupStakingRewards(const int minHeight)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
     bool getStakingRewardWinner(const BlockHash &prevBlockHash,
-                                CScript &winner) const;
-    bool setStakingRewardWinner(const CBlockIndex *pprev,
-                                const CScript &winner);
+                                CScript &winner) const
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
+    bool setStakingRewardWinner(const CBlockIndex *pprev, const CScript &winner)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_stakingRewards);
 
     // Implement NetEventInterface. Only FinalizeNode is of interest.
     void InitializeNode(const ::Config &config, CNode &pnode,
                         ServiceFlags our_services) override {}
     bool ProcessMessages(const ::Config &config, CNode *pnode,
                          std::atomic<bool> &interrupt) override {
         return false;
     }
     bool SendMessages(const ::Config &config, CNode *pnode) override {
         return false;
     }
 
     /** Handle removal of a node */
-    void FinalizeNode(const ::Config &config, const CNode &node) override
-        LOCKS_EXCLUDED(cs_main);
+    void FinalizeNode(const ::Config &config,
+                      const CNode &node) override LOCKS_EXCLUDED(cs_main)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_delayedAvahelloNodeIds);
 
 private:
-    void updatedBlockTip();
-    void runEventLoop();
-    void clearTimedoutRequests();
-    std::vector<CInv> getInvsForNextPoll(bool forPoll = true);
+    void updatedBlockTip()
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
+    void runEventLoop()
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_stakingRewards,
+                                 !cs_finalizedItems);
+    void clearTimedoutRequests() EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager);
+    std::vector<CInv> getInvsForNextPoll(bool forPoll = true)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
     bool sendHelloInternal(CNode *pfrom)
         EXCLUSIVE_LOCKS_REQUIRED(cs_delayedAvahelloNodeIds);
-    AnyVoteItem getVoteItemFromInv(const CInv &inv) const;
+    AnyVoteItem getVoteItemFromInv(const CInv &inv) const
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager);
 
     /**
      * We don't need many blocks but a low false positive rate.
      * In the event of a false positive the node might skip polling this block.
      * Such a block will not get marked as finalized until it is reconsidered
      * for polling (if the filter changed its state) or another block is found.
      */
     mutable Mutex cs_invalidatedBlocks;
     CRollingBloomFilter invalidatedBlocks GUARDED_BY(cs_invalidatedBlocks){
         100, 0.0000001};
 
     /**
      * Rolling bloom filter to track recently finalized inventory items of any
      * type. Once placed in this filter, those items will not be polled again
      * unless they roll out. Note that this one filter tracks all types so
      * blocks may be rolled out by transaction activity for example.
      *
      * We want a low false positive rate to prevent accidentally not polling
      * for an item when it is first seen.
      */
     mutable Mutex cs_finalizedItems;
     CRollingBloomFilter finalizedItems GUARDED_BY(cs_finalizedItems){
         AVALANCHE_FINALIZED_ITEMS_FILTER_NUM_ELEMENTS, 0.0000001};
 
     struct IsWorthPolling {
         const Processor &processor;
 
         IsWorthPolling(const Processor &_processor) : processor(_processor){};
 
         bool operator()(const CBlockIndex *pindex) const
             LOCKS_EXCLUDED(cs_main);
         bool operator()(const ProofRef &proof) const
             LOCKS_EXCLUDED(cs_peerManager);
         bool operator()(const CTransactionRef &tx) const;
     };
-    bool isWorthPolling(const AnyVoteItem &item) const;
+    bool isWorthPolling(const AnyVoteItem &item) const
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_peerManager, !cs_finalizedItems);
 
     struct GetLocalAcceptance {
         const Processor &processor;
 
         GetLocalAcceptance(const Processor &_processor)
             : processor(_processor){};
 
         bool operator()(const CBlockIndex *pindex) const
             LOCKS_EXCLUDED(cs_main);
         bool operator()(const ProofRef &proof) const
             LOCKS_EXCLUDED(cs_peerManager);
         bool operator()(const CTransactionRef &tx) const;
     };
     bool getLocalAcceptance(const AnyVoteItem &item) const {
         return std::visit(GetLocalAcceptance(*this), item);
     }
 
     friend struct ::avalanche::AvalancheTest;
 };
 
 } // namespace avalanche
 
 #endif // BITCOIN_AVALANCHE_PROCESSOR_H
diff --git a/src/eventloop.h b/src/eventloop.h
index 1faaacc0b..ca73fdb1f 100644
--- a/src/eventloop.h
+++ b/src/eventloop.h
@@ -1,38 +1,40 @@
 // Copyright (c) 2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_EVENTLOOP_H
 #define BITCOIN_EVENTLOOP_H
 
 #include <sync.h>
+#include <threadsafety.h>
 
 #include <atomic>
 #include <chrono>
 #include <condition_variable>
 #include <functional>
 
 class CScheduler;
 
 struct EventLoop {
 public:
     EventLoop() {}
     ~EventLoop();
 
     bool startEventLoop(CScheduler &scheduler,
                         std::function<void()> runEventLoop,
-                        std::chrono::milliseconds delta);
-    bool stopEventLoop();
+                        std::chrono::milliseconds delta)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_running);
+    bool stopEventLoop() EXCLUSIVE_LOCKS_REQUIRED(!cs_running);
 
 private:
     /**
      * Start stop machinery.
      */
     std::atomic<bool> stopRequest{false};
     bool running GUARDED_BY(cs_running) = false;
 
     Mutex cs_running;
     std::condition_variable cond_running;
 };
 
 #endif // BITCOIN_EVENTLOOP_H
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index ebf5f5b77..a84d06665 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,7638 +1,7640 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <net_processing.h>
 
 #include <addrman.h>
 #include <avalanche/avalanche.h>
 #include <avalanche/compactproofs.h>
 #include <avalanche/peermanager.h>
 #include <avalanche/processor.h>
 #include <avalanche/proof.h>
 #include <avalanche/statistics.h>
 #include <avalanche/validation.h>
 #include <banman.h>
 #include <blockencodings.h>
 #include <blockfilter.h>
 #include <blockvalidity.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <consensus/amount.h>
 #include <consensus/validation.h>
 #include <hash.h>
 #include <index/blockfilterindex.h>
 #include <invrequest.h>
 #include <merkleblock.h>
 #include <netbase.h>
 #include <netmessagemaker.h>
 #include <node/blockstorage.h>
 #include <policy/fees.h>
 #include <policy/policy.h>
 #include <primitives/block.h>
 #include <primitives/transaction.h>
 #include <random.h>
 #include <reverse_iterator.h>
 #include <scheduler.h>
 #include <streams.h>
 #include <tinyformat.h>
 #include <txmempool.h>
 #include <txorphanage.h>
 #include <util/check.h> // For NDEBUG compile time check
 #include <util/strencodings.h>
 #include <util/system.h>
 #include <util/trace.h>
 #include <validation.h>
 
 #include <algorithm>
 #include <atomic>
 #include <chrono>
 #include <functional>
 #include <future>
 #include <memory>
 #include <typeinfo>
 
 using node::fImporting;
 using node::fPruneMode;
 using node::fReindex;
 using node::ReadBlockFromDisk;
 
 /** How long to cache transactions in mapRelay for normal relay */
 static constexpr auto RELAY_TX_CACHE_TIME = 15min;
 /**
  * How long a transaction has to be in the mempool before it can
  * unconditionally be relayed (even when not in mapRelay).
  */
 static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
 /**
  * Headers download timeout.
  * Timeout = base + per_header * (expected number of headers)
  */
 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
 /**
  * Protect at least this many outbound peers from disconnection due to
  * slow/behind headers chain.
  */
 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
 /** Timeout for (unprotected) outbound peers to sync to our chainwork */
 static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
 /** How frequently to check for stale tips */
 static constexpr auto STALE_CHECK_INTERVAL{10min};
 /** How frequently to check for extra outbound peers and disconnect. */
 static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
 /**
  * Minimum time an outbound-peer-eviction candidate must be connected for, in
  * order to evict
  */
 static constexpr auto MINIMUM_CONNECT_TIME{30s};
 /** SHA256("main address relay")[0:8] */
 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
 /// Age after which a stale block will no longer be served if requested as
 /// protection against fingerprinting. Set to one month, denominated in seconds.
 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
 /// Age after which a block is considered historical for purposes of rate
 /// limiting block relay. Set to one week, denominated in seconds.
 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
 /**
  * Time between pings automatically sent out for latency probing and keepalive.
  */
 static constexpr auto PING_INTERVAL{2min};
 /** The maximum number of entries in a locator */
 static const unsigned int MAX_LOCATOR_SZ = 101;
 /** The maximum number of entries in an 'inv' protocol message */
 static const unsigned int MAX_INV_SZ = 50000;
 static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
               "Max protocol message length must be greater than largest "
               "possible INV message");
 
 /** Minimum time between 2 successives getavaaddr messages from the same peer */
 static constexpr auto GETAVAADDR_INTERVAL{2min};
 
 /**
  * If no proof was requested from a compact proof message after this timeout
  * expired, the proof radix tree can be cleaned up.
  */
 static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT{2min};
 
 struct DataRequestParameters {
     /**
      * Maximum number of in-flight data requests from a peer. It is not a hard
      * limit, but the threshold at which point the overloaded_peer_delay kicks
      * in.
      */
     const size_t max_peer_request_in_flight;
 
     /**
      * Maximum number of inventories to consider for requesting, per peer. It
      * provides a reasonable DoS limit to per-peer memory usage spent on
      * announcements, while covering peers continuously sending INVs at the
      * maximum rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for
      * several minutes, while not receiving the actual data (from any peer) in
      * response to requests for them.
      */
     const size_t max_peer_announcements;
 
     /** How long to delay requesting data from non-preferred peers */
     const std::chrono::seconds nonpref_peer_delay;
 
     /**
      * How long to delay requesting data from overloaded peers (see
      * max_peer_request_in_flight).
      */
     const std::chrono::seconds overloaded_peer_delay;
 
     /**
      * How long to wait (in microseconds) before a data request from an
      * additional peer.
      */
     const std::chrono::microseconds getdata_interval;
 
     /**
      * Permission flags a peer requires to bypass the request limits tracking
      * limits and delay penalty.
      */
     const NetPermissionFlags bypass_request_limits_permissions;
 };
 
 static constexpr DataRequestParameters TX_REQUEST_PARAMS{
     100,                       // max_peer_request_in_flight
     5000,                      // max_peer_announcements
     std::chrono::seconds(2),   // nonpref_peer_delay
     std::chrono::seconds(2),   // overloaded_peer_delay
     std::chrono::seconds(60),  // getdata_interval
     NetPermissionFlags::Relay, // bypass_request_limits_permissions
 };
 
 static constexpr DataRequestParameters PROOF_REQUEST_PARAMS{
     100,                      // max_peer_request_in_flight
     5000,                     // max_peer_announcements
     std::chrono::seconds(2),  // nonpref_peer_delay
     std::chrono::seconds(2),  // overloaded_peer_delay
     std::chrono::seconds(60), // getdata_interval
     NetPermissionFlags::
         BypassProofRequestLimits, // bypass_request_limits_permissions
 };
 
 /**
  * Limit to avoid sending big packets. Not used in processing incoming GETDATA
  * for compatibility.
  */
 static const unsigned int MAX_GETDATA_SZ = 1000;
 /**
  * Number of blocks that can be requested at any given time from a single peer.
  */
 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
 /**
  * Time during which a peer must stall block download progress before being
  * disconnected.
  */
 static constexpr auto BLOCK_STALLING_TIMEOUT{2s};
 /**
  * Number of headers sent in one getheaders result. We rely on the assumption
  * that if a peer sends
  *  less than this number, we reached its tip. Changing this value is a protocol
  * upgrade.
  */
 static const unsigned int MAX_HEADERS_RESULTS = 2000;
 /**
  * Maximum depth of blocks we're willing to serve as compact blocks to peers
  *  when requested. For older blocks, a regular BLOCK response will be sent.
  */
 static const int MAX_CMPCTBLOCK_DEPTH = 5;
 /**
  * Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests
  * for.
  */
 static const int MAX_BLOCKTXN_DEPTH = 10;
 /**
  * Size of the "block download window": how far ahead of our current height do
  * we fetch? Larger windows tolerate larger download speed differences between
  * peer, but increase the potential degree of disordering of blocks on disk
  * (which make reindexing and pruning harder). We'll probably
  *  want to make this a per-peer adaptive value at some point.
  */
 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
 /**
  * Block download timeout base, expressed in multiples of the block interval
  * (i.e. 10 min)
  */
 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
 /**
  * Additional block download timeout per parallel downloading peer (i.e. 5 min)
  */
 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
 /**
  * Maximum number of headers to announce when relaying blocks with headers
  * message.
  */
 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
 /** Maximum number of unconnecting headers announcements before DoS score */
 static const int MAX_UNCONNECTING_HEADERS = 10;
 /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
 /**
  * Average delay between local address broadcasts.
  */
 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
 /**
  * Average delay between peer address broadcasts.
  */
 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
 /** Delay between rotating the peers we relay a particular address to */
 static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
 /**
  * Average delay between trickled inventory transmissions for inbound peers.
  * Blocks and peers with NetPermissionFlags::NoBan permission bypass this.
  */
 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
 /**
  * Maximum rate of inventory items to send per second.
  * Limits the impact of low-fee transaction floods.
  */
 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
 /** Maximum number of inventory items to send per transmission. */
 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
     INVENTORY_BROADCAST_PER_SECOND *
     count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
 /** The number of most recently announced transactions a peer can request. */
 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
 /**
  * Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything
  * typically relayed before unconditional relay from the mempool kicks in. This
  * is only a lower bound, and it should be larger to account for higher inv rate
  * to outbound peers, and random variations in the broadcast mechanism.
  */
 static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND *
                                                 UNCONDITIONAL_RELAY_DELAY /
                                                 std::chrono::seconds{1},
               "INVENTORY_RELAY_MAX too low");
 
 /**
  * Average delay between feefilter broadcasts
  */
 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
 /**
  * Maximum feefilter broadcast delay after significant change.
  */
 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
 /**
  * Maximum number of compact filters that may be requested with one
  * getcfilters. See BIP 157.
  */
 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
 /**
  * Maximum number of cf hashes that may be requested with one getcfheaders. See
  * BIP 157.
  */
 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
 /**
  * the maximum percentage of addresses from our addrman to return in response
  * to a getaddr message.
  */
 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
 /** The maximum number of address records permitted in an ADDR message. */
 static constexpr size_t MAX_ADDR_TO_SEND{1000};
 /**
  * The maximum rate of address records we're willing to process on average. Can
  * be bypassed using the NetPermissionFlags::Addr permission.
  */
 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
 /**
  * The soft limit of the address processing token bucket (the regular
  * MAX_ADDR_RATE_PER_SECOND based increments won't go above this, but the
  * MAX_ADDR_TO_SEND increment following GETADDR is exempt from this limit).
  */
 static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
 /** The compactblocks version we support. See BIP 152. */
 static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
 
 inline size_t GetMaxAddrToSend() {
     return gArgs.GetIntArg("-maxaddrtosend", MAX_ADDR_TO_SEND);
 }
 
 // Internal stuff
 namespace {
 /**
  * Blocks that are in flight, and that are in the queue to be downloaded.
  */
 struct QueuedBlock {
     /**
      * BlockIndex. We must have this since we only request blocks when we've
      * already validated the header.
      */
     const CBlockIndex *pindex;
     /** Optional, used for CMPCTBLOCK downloads */
     std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
 };
 
 /**
  * Data structure for an individual peer. This struct is not protected by
  * cs_main since it does not contain validation-critical data.
  *
  * Memory is owned by shared pointers and this object is destructed when
  * the refcount drops to zero.
  *
  * Mutexes inside this struct must not be held when locking m_peer_mutex.
  *
  * TODO: move most members from CNodeState to this structure.
  * TODO: move remaining application-layer data members from CNode to this
  * structure.
  */
 struct Peer {
     /** Same id as the CNode object for this peer */
     const NodeId m_id{0};
 
     /**
      * Services we offered to this peer.
      *
      * This is supplied by CConnman during peer initialization. It's const
      * because there is no protocol defined for renegotiating services
      * initially offered to a peer. The set of local services we offer should
      * not change after initialization.
      *
      * An interesting example of this is NODE_NETWORK and initial block
      * download: a node which starts up from scratch doesn't have any blocks
      * to serve, but still advertises NODE_NETWORK because it will eventually
      * fulfill this role after IBD completes. P2P code is written in such a
      * way that it can gracefully handle peers who don't make good on their
      * service advertisements.
      */
     const ServiceFlags m_our_services;
 
     /** Services this peer offered to us. */
     std::atomic<ServiceFlags> m_their_services{NODE_NONE};
 
     /** Protects misbehavior data members */
     Mutex m_misbehavior_mutex;
     /** Accumulated misbehavior score for this peer */
     int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
     /** Whether this peer should be disconnected and marked as discouraged
      * (unless it has NetPermissionFlags::NoBan permission). */
     bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
 
     /** Protects block inventory data members */
     Mutex m_block_inv_mutex;
     /**
      * List of blocks that we'll anounce via an `inv` message.
      * There is no final sorting before sending, as they are always sent
      * immediately and in the order requested.
      */
     std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
     /**
      * Unfiltered list of blocks that we'd like to announce via a `headers`
      * message. If we can't announce via a `headers` message, we'll fall back to
      * announcing via `inv`.
      */
     std::vector<BlockHash>
         m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
 
     /**
      * The final block hash that we sent in an `inv` message to this peer.
      * When the peer requests this block, we send an `inv` message to trigger
      * the peer to request the next sequence of block hashes.
      * Most peers use headers-first syncing, which doesn't use this mechanism
      */
     BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
 
     /** This peer's reported block height when we connected */
     std::atomic<int> m_starting_height{-1};
 
     /** The pong reply we're expecting, or 0 if no pong expected. */
     std::atomic<uint64_t> m_ping_nonce_sent{0};
     /** When the last ping was sent, or 0 if no ping was ever sent */
     std::atomic<std::chrono::microseconds> m_ping_start{0us};
     /** Whether a ping has been requested by the user */
     std::atomic<bool> m_ping_queued{false};
 
     /**
      * The feerate in the most recent BIP133 `feefilter` message sent to the
      * peer.
      * It is *not* a p2p protocol violation for the peer to send us
      * transactions with a lower fee rate than this. See BIP133.
      */
     Amount m_fee_filter_sent{Amount::zero()};
     std::chrono::microseconds m_next_send_feefilter{0};
 
     struct TxRelay {
         mutable RecursiveMutex m_bloom_filter_mutex;
         /**
          * Whether the peer wishes to receive transaction announcements.
          *
          * This is initially set based on the fRelay flag in the received
          * `version` message. If initially set to false, it can only be flipped
          * to true if we have offered the peer NODE_BLOOM services and it sends
          * us a `filterload` or `filterclear` message. See BIP37.
          */
         bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
         /**
          * A bloom filter for which transactions to announce to the peer.
          * See BIP37.
          */
         std::unique_ptr<CBloomFilter>
             m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex)
                 GUARDED_BY(m_bloom_filter_mutex){nullptr};
 
         mutable RecursiveMutex m_tx_inventory_mutex;
         /**
          * A filter of all the txids that the peer has announced to us or we
          * have announced to the peer. We use this to avoid announcing
          * the same txid to a peer that already has the transaction.
          */
         CRollingBloomFilter m_tx_inventory_known_filter
             GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
         /**
          * Set of transaction ids we still have to announce. We use the
          * mempool to sort transactions in dependency order before relay, so
          * this does not have to be sorted.
          */
         std::set<TxId> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
         /**
          * Whether the peer has requested us to send our complete mempool. Only
          * permitted if the peer has NetPermissionFlags::Mempool.
          * See BIP35.
          */
         bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
         /** The last time a BIP35 `mempool` request was serviced. */
         std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
         /**
          * The next time after which we will send an `inv` message containing
          * transaction announcements to this peer.
          */
         std::chrono::microseconds m_next_inv_send_time{0};
 
         /**
          * Minimum fee rate with which to filter transaction announcements to
          * this node. See BIP133.
          */
         std::atomic<Amount> m_fee_filter_received{Amount::zero()};
     };
 
     /*
      * Initializes a TxRelay struct for this peer. Can be called at most once
      * for a peer.
      */
     TxRelay *SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
         LOCK(m_tx_relay_mutex);
         Assume(!m_tx_relay);
         m_tx_relay = std::make_unique<Peer::TxRelay>();
         return m_tx_relay.get();
     };
 
     TxRelay *GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
         return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
     };
 
     struct ProofRelay {
         mutable RecursiveMutex m_proof_inventory_mutex;
         std::set<avalanche::ProofId>
             m_proof_inventory_to_send GUARDED_BY(m_proof_inventory_mutex);
         // Prevent sending proof invs if the peer already knows about them
         CRollingBloomFilter m_proof_inventory_known_filter
             GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
         std::chrono::microseconds m_next_inv_send_time{0};
 
         RadixTree<const avalanche::Proof, avalanche::ProofRadixTreeAdapter>
             sharedProofs;
         std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
         std::atomic<bool> compactproofs_requested{false};
     };
 
     /**
      * Proof relay data. Will be a nullptr if we're not relaying
      * proofs with this peer
      */
     const std::unique_ptr<ProofRelay> m_proof_relay;
 
     /**
      * A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND.
      */
     std::vector<CAddress> m_addrs_to_send;
     /**
      * Probabilistic filter to track recent addr messages relayed with this
      * peer. Used to avoid relaying redundant addresses to this peer.
      *
      *  We initialize this filter for outbound peers (other than
      *  block-relay-only connections) or when an inbound peer sends us an
      *  address related message (ADDR, ADDRV2, GETADDR).
      *
      *  Presence of this filter must correlate with m_addr_relay_enabled.
      **/
     std::unique_ptr<CRollingBloomFilter> m_addr_known;
     /**
      * Whether we are participating in address relay with this connection.
      *
      * We set this bool to true for outbound peers (other than
      * block-relay-only connections), or when an inbound peer sends us an
      * address related message (ADDR, ADDRV2, GETADDR).
      *
      * We use this bool to decide whether a peer is eligible for gossiping
      * addr messages. This avoids relaying to peers that are unlikely to
      * forward them, effectively blackholing self announcements. Reasons
      * peers might support addr relay on the link include that they connected
      * to us as a block-relay-only peer or they are a light client.
      *
      * This field must correlate with whether m_addr_known has been
      * initialized.
      */
     std::atomic_bool m_addr_relay_enabled{false};
     /** Whether a getaddr request to this peer is outstanding. */
     bool m_getaddr_sent{false};
     /** Guards address sending timers. */
     mutable Mutex m_addr_send_times_mutex;
     /** Time point to send the next ADDR message to this peer. */
     std::chrono::microseconds
         m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
     /** Time point to possibly re-announce our local address to this peer. */
     std::chrono::microseconds
         m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
     /**
      * Whether the peer has signaled support for receiving ADDRv2 (BIP155)
      * messages, indicating a preference to receive ADDRv2 instead of ADDR ones.
      */
     std::atomic_bool m_wants_addrv2{false};
     /** Whether this peer has already sent us a getaddr message. */
     bool m_getaddr_recvd{false};
     /** Guards m_addr_token_bucket */
     mutable Mutex m_addr_token_bucket_mutex;
     /**
      * Number of addresses that can be processed from this peer. Start at 1
      * to permit self-announcement.
      */
     double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
     /** When m_addr_token_bucket was last updated */
     std::chrono::microseconds m_addr_token_timestamp{
         GetTime<std::chrono::microseconds>()};
     /** Total number of addresses that were dropped due to rate limiting. */
     std::atomic<uint64_t> m_addr_rate_limited{0};
     /**
      * Total number of addresses that were processed (excludes rate-limited
      * ones).
      */
     std::atomic<uint64_t> m_addr_processed{0};
 
     /**
      * Set of txids to reconsider once their parent transactions have been
      * accepted
      */
     std::set<TxId> m_orphan_work_set GUARDED_BY(g_cs_orphans);
 
     /** Protects m_getdata_requests **/
     Mutex m_getdata_requests_mutex;
     /** Work queue of items requested by this peer **/
     std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
 
     explicit Peer(NodeId id, ServiceFlags our_services)
         : m_id(id), m_our_services{our_services},
           m_proof_relay(isAvalancheEnabled(gArgs)
                             ? std::make_unique<ProofRelay>()
                             : nullptr) {}
 
 private:
     Mutex m_tx_relay_mutex;
 
     /**
      * Transaction relay data. Will be a nullptr if we're not relaying
      * transactions with this peer (e.g. if it's a block-relay-only peer or
      * the peer has sent us fRelay=false with bloom filters disabled).
      */
     std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
 };
 
 using PeerRef = std::shared_ptr<Peer>;
 
 /**
  * Maintain validation-specific state about nodes, protected by cs_main, instead
  * by CNode's own locks. This simplifies asynchronous operation, where
  * processing of incoming data is done after the ProcessMessage call returns,
  * and we're no longer holding the node's locks.
  */
 struct CNodeState {
     //! The best known block we know this peer has announced.
     const CBlockIndex *pindexBestKnownBlock{nullptr};
     //! The hash of the last unknown block this peer has announced.
     BlockHash hashLastUnknownBlock{};
     //! The last full block we both have.
     const CBlockIndex *pindexLastCommonBlock{nullptr};
     //! The best header we have sent our peer.
     const CBlockIndex *pindexBestHeaderSent{nullptr};
     //! Length of current-streak of unconnecting headers announcements
     int nUnconnectingHeaders{0};
     //! Whether we've started headers synchronization with this peer.
     bool fSyncStarted{false};
     //! When to potentially disconnect peer for stalling headers download
     std::chrono::microseconds m_headers_sync_timeout{0us};
     //! Since when we're stalling block download progress (in microseconds), or
     //! 0.
     std::chrono::microseconds m_stalling_since{0us};
     std::list<QueuedBlock> vBlocksInFlight;
     //! When the first entry in vBlocksInFlight started downloading. Don't care
     //! when vBlocksInFlight is empty.
     std::chrono::microseconds m_downloading_since{0us};
     int nBlocksInFlight{0};
     //! Whether we consider this a preferred download peer.
     bool fPreferredDownload{false};
     //! Whether this peer wants invs or headers (when possible) for block
     //! announcements.
     bool fPreferHeaders{false};
     /**
      * Whether this peer wants invs or cmpctblocks (when possible) for block
      * announcements.
      */
     bool m_requested_hb_cmpctblocks{false};
     /** Whether this peer will send us cmpctblocks if we request them. */
     bool m_provides_cmpctblocks{false};
 
     /**
      * State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL
      * logic.
      *
      * Both are only in effect for outbound, non-manual, non-protected
      * connections. Any peer protected (m_protect = true) is not chosen for
      * eviction. A peer is marked as protected if all of these are true:
      *   - its connection type is IsBlockOnlyConn() == false
      *   - it gave us a valid connecting header
      *   - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
      *   - it has a better chain than we have
      *
      * CHAIN_SYNC_TIMEOUT:  if a peer's best known block has less work than our
      * tip, set a timeout CHAIN_SYNC_TIMEOUT in the future:
      *   - If at timeout their best known block now has more work than our tip
      * when the timeout was set, then either reset the timeout or clear it
      * (after comparing against our current tip's work)
      *   - If at timeout their best known block still has less work than our tip
      * did when the timeout was set, then send a getheaders message, and set a
      * shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best
      * known block is still behind when that new timeout is reached, disconnect.
      *
      * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many
      * outbound peers, drop the outbound one that least recently announced us a
      * new block.
      */
     struct ChainSyncTimeoutState {
         //! A timeout used for checking whether our peer has sufficiently
         //! synced.
         std::chrono::seconds m_timeout{0s};
         //! A header with the work we require on our peer's chain.
         const CBlockIndex *m_work_header{nullptr};
         //! After timeout is reached, set to true after sending getheaders.
         bool m_sent_getheaders{false};
         //! Whether this peer is protected from disconnection due to a bad/slow
         //! chain.
         bool m_protect{false};
     };
 
     ChainSyncTimeoutState m_chain_sync;
 
     //! Time of last new block announcement
     int64_t m_last_block_announcement{0};
 
     //! Whether this peer is an inbound connection
     const bool m_is_inbound;
 
     //! A rolling bloom filter of all announced tx CInvs to this peer.
     CRollingBloomFilter m_recently_announced_invs =
         CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
 
     //! A rolling bloom filter of all announced Proofs CInvs to this peer.
     CRollingBloomFilter m_recently_announced_proofs =
         CRollingBloomFilter{INVENTORY_MAX_RECENT_RELAY, 0.000001};
 
     CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
 };
 
 class PeerManagerImpl final : public PeerManager {
 public:
     PeerManagerImpl(CConnman &connman, AddrMan &addrman, BanMan *banman,
                     ChainstateManager &chainman, CTxMemPool &pool,
                     bool ignore_incoming_txs);
 
     /** Overridden from CValidationInterface. */
     void BlockConnected(const std::shared_ptr<const CBlock> &pblock,
                         const CBlockIndex *pindexConnected) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
     void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
                            const CBlockIndex *pindex) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
     void UpdatedBlockTip(const CBlockIndex *pindexNew,
                          const CBlockIndex *pindexFork,
                          bool fInitialDownload) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void BlockChecked(const CBlock &block,
                       const BlockValidationState &state) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void NewPoWValidBlock(const CBlockIndex *pindex,
                           const std::shared_ptr<const CBlock> &pblock) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
 
     /** Implement NetEventsInterface */
     void InitializeNode(const Config &config, CNode &node,
                         ServiceFlags our_services) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void FinalizeNode(const Config &config, const CNode &node) override
-        EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
     bool ProcessMessages(const Config &config, CNode *pfrom,
                          std::atomic<bool> &interrupt) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
                                  !m_recent_confirmed_transactions_mutex,
-                                 !m_most_recent_block_mutex);
+                                 !m_most_recent_block_mutex, !cs_proofrequest);
     bool SendMessages(const Config &config, CNode *pto) override
         EXCLUSIVE_LOCKS_REQUIRED(pto->cs_sendProcessing)
             EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
                                      !m_recent_confirmed_transactions_mutex,
-                                     !m_most_recent_block_mutex);
+                                     !m_most_recent_block_mutex,
+                                     !cs_proofrequest);
 
     /** Implement PeerManager */
     void StartScheduledTasks(CScheduler &scheduler) override;
     void CheckForStaleTipAndEvictPeers() override;
     std::optional<std::string>
     FetchBlock(const Config &config, NodeId peer_id,
                const CBlockIndex &block_index) override;
     bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
     void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void RelayTransaction(const TxId &txid) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void RelayProof(const avalanche::ProofId &proofid) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void SetBestHeight(int height) override { m_best_height = height; };
     void Misbehaving(const NodeId pnode, const int howmuch,
                      const std::string &message) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void ProcessMessage(const Config &config, CNode &pfrom,
                         const std::string &msg_type, CDataStream &vRecv,
                         const std::chrono::microseconds time_received,
                         const std::atomic<bool> &interruptMsgProc) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
                                  !m_recent_confirmed_transactions_mutex,
-                                 !m_most_recent_block_mutex);
+                                 !m_most_recent_block_mutex, !cs_proofrequest);
     void UpdateLastBlockAnnounceTime(NodeId node,
                                      int64_t time_in_seconds) override;
 
 private:
     /**
      * Consider evicting an outbound peer based on the amount of time they've
      * been behind our tip.
      */
     void ConsiderEviction(CNode &pto, std::chrono::seconds time_in_seconds)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /**
      * If we have extra outbound peers, try to disconnect the one with the
      * oldest block announcement.
      */
     void EvictExtraOutboundPeers(std::chrono::seconds now)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Retrieve unbroadcast transactions from the mempool and reattempt
      * sending to peers
      */
     void ReattemptInitialBroadcast(CScheduler &scheduler)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Update the avalanche statistics for all the nodes
      */
     void UpdateAvalancheStatistics() const;
 
     /**
      * Process periodic avalanche network messaging and cleanups.
      */
     void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
 
     /**
      * Get a shared pointer to the Peer object.
      * May return an empty shared_ptr if the Peer object can't be found.
      */
     PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Get a shared pointer to the Peer object and remove it from m_peer_map.
      * May return an empty shared_ptr if the Peer object can't be found.
      */
     PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     // overloaded variant of above to operate on CNode*s
     void Misbehaving(const CNode &node, int howmuch, const std::string &message)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) {
         Misbehaving(node.GetId(), howmuch, message);
     }
 
     /**
      * Potentially mark a node discouraged based on the contents of a
      * BlockValidationState object
      *
      * @param[in] via_compact_block this bool is passed in because
      * net_processing should punish peers differently depending on whether the
      * data was provided in a compact block message or not. If the compact block
      * had a valid header, but contained invalid txs, the peer should not be
      * punished. See BIP 152.
      *
      * @return Returns true if the peer was punished (probably disconnected)
      */
     bool MaybePunishNodeForBlock(NodeId nodeid,
                                  const BlockValidationState &state,
                                  bool via_compact_block,
                                  const std::string &message = "")
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Potentially disconnect and discourage a node based on the contents of a
      * TxValidationState object
      *
      * @return Returns true if the peer was punished (probably disconnected)
      */
     bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
                               const std::string &message = "")
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Maybe disconnect a peer and discourage future connections from its
      * address.
      *
      * @param[in]   pnode     The node to check.
      * @param[in]   peer      The peer object to check.
      * @return                True if the peer was marked for disconnection in
      * this function
      */
     bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
 
     void ProcessOrphanTx(const Config &config, std::set<TxId> &orphan_work_set)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans)
             EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     /** Process a single headers message from a peer. */
     void ProcessHeadersMessage(const Config &config, CNode &pfrom,
                                const Peer &peer,
                                const std::vector<CBlockHeader> &headers,
                                bool via_compact_block)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     void SendBlockTransactions(CNode &pfrom, const CBlock &block,
                                const BlockTransactionsRequest &req)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Register with InvRequestTracker that a TX INV has been received from a
      * peer. The announcement parameters are decided in PeerManager and then
      * passed to InvRequestTracker.
      */
     void AddTxAnnouncement(const CNode &node, const TxId &txid,
                            std::chrono::microseconds current_time)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     /**
      * Register with InvRequestTracker that a PROOF INV has been received from a
      * peer. The announcement parameters are decided in PeerManager and then
      * passed to InvRequestTracker.
      */
     void
     AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
                          std::chrono::microseconds current_time, bool preferred)
         EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
 
     /** Send a version message to a peer */
     void PushNodeVersion(const Config &config, CNode &pnode, const Peer &peer);
 
     /**
      * Send a ping message every PING_INTERVAL or if requested via RPC. May mark
      * the peer to be disconnected if a ping has timed out.
      * We use mockable time for ping timeouts, so setmocktime may cause pings
      * to time out.
      */
     void MaybeSendPing(CNode &node_to, Peer &peer,
                        std::chrono::microseconds now);
 
     /** Send `addr` messages on a regular schedule. */
     void MaybeSendAddr(CNode &node, Peer &peer,
                        std::chrono::microseconds current_time);
 
     /** Send `feefilter` message. */
     void MaybeSendFeefilter(CNode &node, Peer &peer,
                             std::chrono::microseconds current_time);
 
     /**
      * Relay (gossip) an address to a few randomly chosen nodes.
      *
      * @param[in] originator   The id of the peer that sent us the address. We
      *                         don't want to relay it back.
      * @param[in] addr         Address to relay.
      * @param[in] fReachable   Whether the address' network is reachable. We
      *                         relay unreachable addresses less.
      */
     void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     const CChainParams &m_chainparams;
     CConnman &m_connman;
     AddrMan &m_addrman;
     /**
      * Pointer to this node's banman. May be nullptr - check existence before
      * dereferencing.
      */
     BanMan *const m_banman;
     ChainstateManager &m_chainman;
     CTxMemPool &m_mempool;
     InvRequestTracker<TxId> m_txrequest GUARDED_BY(::cs_main);
 
     Mutex cs_proofrequest;
     InvRequestTracker<avalanche::ProofId>
         m_proofrequest GUARDED_BY(cs_proofrequest);
 
     /** The height of the best chain */
     std::atomic<int> m_best_height{-1};
 
     /** Next time to check for stale tip */
     std::chrono::seconds m_stale_tip_check_time{0s};
 
     /** Whether this node is running in blocks only mode */
     const bool m_ignore_incoming_txs;
 
     /**
      * Whether we've completed initial sync yet, for determining when to turn
      * on extra block-relay-only peers.
      */
     bool m_initial_sync_finished{false};
 
     /**
      * Protects m_peer_map. This mutex must not be locked while holding a lock
      * on any of the mutexes inside a Peer object.
      */
     mutable Mutex m_peer_mutex;
     /**
      * Map of all Peer objects, keyed by peer id. This map is protected
      * by the m_peer_mutex. Once a shared pointer reference is
      * taken, the lock may be released. Individual fields are protected by
      * their own locks.
      */
     std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
 
     /** Map maintaining per-node state. */
     std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
 
     /**
      * Get a pointer to a const CNodeState, used when not mutating the
      * CNodeState object.
      */
     const CNodeState *State(NodeId pnode) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /** Get a pointer to a mutable CNodeState. */
     CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
 
     /** Number of nodes with fSyncStarted. */
     int nSyncStarted GUARDED_BY(cs_main) = 0;
 
     /**
      * Sources of received blocks, saved to be able to punish them when
      * processing happens afterwards.
      * Set mapBlockSource[hash].second to false if the node should not be
      * punished if the block is invalid.
      */
     std::map<BlockHash, std::pair<NodeId, bool>>
         mapBlockSource GUARDED_BY(cs_main);
 
     /** Number of outbound peers with m_chain_sync.m_protect. */
     int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
 
     /** Number of preferable block download peers. */
     int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
 
     bool AlreadyHaveTx(const TxId &txid)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main,
                                  !m_recent_confirmed_transactions_mutex);
 
     /**
      * Filter for transactions that were recently rejected by the mempool.
      * These are not rerequested until the chain tip changes, at which point
      * the entire filter is reset.
      *
      * Without this filter we'd be re-requesting txs from each of our peers,
      * increasing bandwidth consumption considerably. For instance, with 100
      * peers, half of which relay a tx we don't accept, that might be a 50x
      * bandwidth increase. A flooding attacker attempting to roll-over the
      * filter using minimum-sized, 60byte, transactions might manage to send
      * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
      * two minute window to send invs to us.
      *
      * Decreasing the false positive rate is fairly cheap, so we pick one in a
      * million to make it highly unlikely for users to have issues with this
      * filter.
      *
      * Memory used: 1.3 MB
      */
     CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000,
                                                                0.000'001};
     uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
 
     /**
      * Filter for transactions that have been recently confirmed.
      * We use this to avoid requesting transactions that have already been
      * confirmed.
      */
     mutable Mutex m_recent_confirmed_transactions_mutex;
     CRollingBloomFilter m_recent_confirmed_transactions
         GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
 
     /**
      * For sending `inv`s to inbound peers, we use a single (exponentially
      * distributed) timer for all peers. If we used a separate timer for each
      * peer, a spy node could make multiple inbound connections to us to
      * accurately determine when we received the transaction (and potentially
      * determine the transaction's origin).
      */
     std::chrono::microseconds
     NextInvToInbounds(std::chrono::microseconds now,
                       std::chrono::seconds average_interval);
 
     // All of the following cache a recent block, and are protected by
     // m_most_recent_block_mutex
     Mutex m_most_recent_block_mutex;
     std::shared_ptr<const CBlock>
         m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
     std::shared_ptr<const CBlockHeaderAndShortTxIDs>
         m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
     BlockHash m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
 
     /**
      * Height of the highest block announced using BIP 152 high-bandwidth mode.
      */
     int m_highest_fast_announce{0};
 
     /** Have we requested this block from a peer */
     bool IsBlockRequested(const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Remove this block from our tracked requested blocks. Called if:
      *  - the block has been received from a peer
      *  - the request for the block has timed out
      */
     void RemoveBlockRequest(const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Mark a block as in flight
      * Returns false, still setting pit, if the block was already in flight from
      * the same peer pit will only be valid as long as the same cs_main lock is
      * being held
      */
     bool BlockRequested(const Config &config, NodeId nodeid,
                         const CBlockIndex &block,
                         std::list<QueuedBlock>::iterator **pit = nullptr)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Update pindexLastCommonBlock and add not-in-flight missing successors to
      * vBlocks, until it has at most count entries.
      */
     void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
                                   std::vector<const CBlockIndex *> &vBlocks,
                                   NodeId &nodeStaller)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
         mapBlocksInFlight GUARDED_BY(cs_main);
 
     /** When our tip was last updated. */
     std::atomic<std::chrono::seconds> m_last_tip_update{0s};
 
     /**
      * Determine whether or not a peer can request a transaction, and return it
      * (or nullptr if not found or not allowed).
      */
     CTransactionRef FindTxForGetData(const CNode &peer, const TxId &txid,
                                      const std::chrono::seconds mempool_req,
                                      const std::chrono::seconds now)
         LOCKS_EXCLUDED(cs_main);
 
     void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
                         const std::atomic<bool> &interruptMsgProc)
         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
                                  peer.m_getdata_requests_mutex)
             LOCKS_EXCLUDED(cs_main);
 
     /** Process a new block. Perform any post-processing housekeeping */
     void ProcessBlock(const Config &config, CNode &node,
                       const std::shared_ptr<const CBlock> &block,
                       bool force_processing);
 
     /** Relay map. */
     typedef std::map<TxId, CTransactionRef> MapRelay;
     MapRelay mapRelay GUARDED_BY(cs_main);
 
     /**
      * Expiration-time ordered list of (expire time, relay map entry) pairs,
      * protected by cs_main).
      */
     std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
         g_relay_expiration GUARDED_BY(cs_main);
 
     /**
      * When a peer sends us a valid block, instruct it to announce blocks to us
      * using CMPCTBLOCK if possible by adding its nodeid to the end of
      * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size
      * by removing the first element if necessary.
      */
     void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Stack of nodes which we have set to announce using compact blocks */
     std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
 
     /** Number of peers from which we're downloading blocks. */
     int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
 
     /** Storage for orphan information */
     TxOrphanage m_orphanage;
 
     void AddToCompactExtraTransactions(const CTransactionRef &tx)
         EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans);
 
     /**
      * Orphan/conflicted/etc transactions that are kept for compact block
      * reconstruction.
      * The last
      * -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
      * these are kept in a ring buffer
      */
     std::vector<std::pair<TxHash, CTransactionRef>>
         vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
     /** Offset into vExtraTxnForCompact to insert the next tx */
     size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
 
     /**
      * Check whether the last unknown block a peer advertised is not yet known.
      */
     void ProcessBlockAvailability(NodeId nodeid)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /**
      * Update tracking information about which blocks a peer is assumed to have.
      */
     void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * To prevent fingerprinting attacks, only send blocks/headers outside of
      * the active chain if they are no more than a month older (both in time,
      * and in best equivalent proof of work) than the best header chain we know
      * about and we fully-validated them at some point.
      */
     bool BlockRequestAllowed(const CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool AlreadyHaveBlock(const BlockHash &block_hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool AlreadyHaveProof(const avalanche::ProofId &proofid);
     void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
                              const CInv &inv)
         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
 
     /**
      * Validation logic for compact filters request handling.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   filter_type     The filter type the request is for. Must be
      *                              basic filters.
      * @param[in]   start_height    The start height for the request
      * @param[in]   stop_hash       The stop_hash for the request
      * @param[in]   max_height_diff The maximum number of items permitted to
      *                              request, as specified in BIP 157
      * @param[out]  stop_index      The CBlockIndex for the stop_hash block, if
      *                              the request can be serviced.
      * @param[out]  filter_index    The filter index, if the request can be
      *                              serviced.
      * @return                      True if the request can be serviced.
      */
     bool PrepareBlockFilterRequest(CNode &node, Peer &peer,
                                    BlockFilterType filter_type,
                                    uint32_t start_height,
                                    const BlockHash &stop_hash,
                                    uint32_t max_height_diff,
                                    const CBlockIndex *&stop_index,
                                    BlockFilterIndex *&filter_index);
 
     /**
      * Handle a cfilters request.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   vRecv           The raw message received
      */
     void ProcessGetCFilters(CNode &node, Peer &peer, CDataStream &vRecv);
     /**
      * Handle a cfheaders request.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   vRecv           The raw message received
      */
     void ProcessGetCFHeaders(CNode &node, Peer &peer, CDataStream &vRecv);
     /**
      * Handle a getcfcheckpt request.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   vRecv           The raw message received
      */
     void ProcessGetCFCheckPt(CNode &node, Peer &peer, CDataStream &vRecv);
 
     /**
      * Decide a response for an Avalanche poll about the given block.
      *
      * @param[in]   hash            The hash of the block being polled for
      * @return                      Our current vote for the block
      */
     uint32_t GetAvalancheVoteForBlock(const BlockHash &hash) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Decide a response for an Avalanche poll about the given transaction.
      *
      * @param[in] id       The id of the transaction being polled for
      * @return             Our current vote for the transaction
      */
     uint32_t GetAvalancheVoteForTx(const TxId &id) const
-        EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+        EXCLUSIVE_LOCKS_REQUIRED(cs_main,
+                                 !m_recent_confirmed_transactions_mutex);
 
     /**
      * Checks if address relay is permitted with peer. If needed, initializes
      * the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
      *
      *  @return   True if address relay is enabled with peer
      *            False if address relay is disallowed
      */
     bool SetupAddressRelay(const CNode &node, Peer &peer);
 
     /**
      * Manage reception of an avalanche proof.
      *
      * @return   False if the peer is misbehaving, true otherwise
      */
     bool ReceivedAvalancheProof(CNode &node, Peer &peer,
                                 const avalanche::ProofRef &proof)
-        EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
 
     avalanche::ProofRef FindProofForGetData(const CNode &peer,
                                             const avalanche::ProofId &proofid,
                                             const std::chrono::seconds now);
 
     bool isPreferredDownloadPeer(const CNode &pfrom);
 };
 
 const CNodeState *PeerManagerImpl::State(NodeId pnode) const
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
     if (it == m_node_states.end()) {
         return nullptr;
     }
 
     return &it->second;
 }
 
 CNodeState *PeerManagerImpl::State(NodeId pnode)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     return const_cast<CNodeState *>(std::as_const(*this).State(pnode));
 }
 
 /**
  * Whether the peer supports the address. For example, a peer that does not
  * implement BIP155 cannot receive Tor v3 addresses because it requires
  * ADDRv2 (BIP155) encoding.
  */
 static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
     return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
 }
 
 static void AddAddressKnown(Peer &peer, const CAddress &addr) {
     assert(peer.m_addr_known);
     peer.m_addr_known->insert(addr.GetKey());
 }
 
 static void PushAddress(Peer &peer, const CAddress &addr,
                         FastRandomContext &insecure_rand) {
     // Known checking here is only to save space from duplicates.
     // Before sending, we'll filter it again for known addresses that were
     // added after addresses were pushed.
     assert(peer.m_addr_known);
     if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
         IsAddrCompatible(peer, addr)) {
         if (peer.m_addrs_to_send.size() >= GetMaxAddrToSend()) {
             peer.m_addrs_to_send[insecure_rand.randrange(
                 peer.m_addrs_to_send.size())] = addr;
         } else {
             peer.m_addrs_to_send.push_back(addr);
         }
     }
 }
 
 static void AddKnownTx(Peer &peer, const TxId &txid) {
     auto tx_relay = peer.GetTxRelay();
     if (!tx_relay) {
         return;
     }
 
     LOCK(tx_relay->m_tx_inventory_mutex);
     tx_relay->m_tx_inventory_known_filter.insert(txid);
 }
 
 static void AddKnownProof(Peer &peer, const avalanche::ProofId &proofid) {
     if (peer.m_proof_relay != nullptr) {
         LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
         peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
     }
 }
 
 bool PeerManagerImpl::isPreferredDownloadPeer(const CNode &pfrom) {
     LOCK(cs_main);
     const CNodeState *state = State(pfrom.GetId());
     return state && state->fPreferredDownload;
 }
 /** Whether this peer can serve us blocks. */
 static bool CanServeBlocks(const Peer &peer) {
     return peer.m_their_services & (NODE_NETWORK | NODE_NETWORK_LIMITED);
 }
 
 /**
  * Whether this peer can only serve limited recent blocks (e.g. because
  * it prunes old blocks)
  */
 static bool IsLimitedPeer(const Peer &peer) {
     return (!(peer.m_their_services & NODE_NETWORK) &&
             (peer.m_their_services & NODE_NETWORK_LIMITED));
 }
 
 std::chrono::microseconds
 PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
                                    std::chrono::seconds average_interval) {
     if (m_next_inv_to_inbounds.load() < now) {
         // If this function were called from multiple threads simultaneously
         // it would possible that both update the next send variable, and return
         // a different result to their caller. This is not possible in practice
         // as only the net processing thread invokes this function.
         m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
     }
     return m_next_inv_to_inbounds;
 }
 
 bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
     return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
 }
 
 void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash) {
     auto it = mapBlocksInFlight.find(hash);
 
     if (it == mapBlocksInFlight.end()) {
         // Block was not requested
         return;
     }
 
     auto [node_id, list_it] = it->second;
     CNodeState *state = State(node_id);
     assert(state != nullptr);
 
     if (state->vBlocksInFlight.begin() == list_it) {
         // First block on the queue was received, update the start download time
         // for the next one
         state->m_downloading_since = std::max(
             state->m_downloading_since, GetTime<std::chrono::microseconds>());
     }
     state->vBlocksInFlight.erase(list_it);
 
     state->nBlocksInFlight--;
     if (state->nBlocksInFlight == 0) {
         // Last validated block on the queue was received.
         m_peers_downloading_from--;
     }
     state->m_stalling_since = 0us;
     mapBlocksInFlight.erase(it);
 }
 
 bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
                                      const CBlockIndex &block,
                                      std::list<QueuedBlock>::iterator **pit) {
     const BlockHash &hash{block.GetBlockHash()};
 
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Short-circuit most stuff in case it is from the same node.
     std::map<BlockHash,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end() &&
         itInFlight->second.first == nodeid) {
         if (pit) {
             *pit = &itInFlight->second.second;
         }
         return false;
     }
 
     // Make sure it's not listed somewhere already.
     RemoveBlockRequest(hash);
 
     std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
         state->vBlocksInFlight.end(),
         {&block, std::unique_ptr<PartiallyDownloadedBlock>(
                      pit ? new PartiallyDownloadedBlock(config, &m_mempool)
                          : nullptr)});
     state->nBlocksInFlight++;
     if (state->nBlocksInFlight == 1) {
         // We're starting a block download (batch) from this peer.
         state->m_downloading_since = GetTime<std::chrono::microseconds>();
         m_peers_downloading_from++;
     }
 
     itInFlight = mapBlocksInFlight
                      .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
                      .first;
 
     if (pit) {
         *pit = &itInFlight->second.second;
     }
 
     return true;
 }
 
 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
     AssertLockHeld(cs_main);
 
     // Never request high-bandwidth mode from peers if we're blocks-only. Our
     // mempool will not contain the transactions necessary to reconstruct the
     // compact block.
     if (m_ignore_incoming_txs) {
         return;
     }
 
     CNodeState *nodestate = State(nodeid);
     if (!nodestate) {
         LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
         return;
     }
     if (!nodestate->m_provides_cmpctblocks) {
         return;
     }
     int num_outbound_hb_peers = 0;
     for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
          it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
         if (*it == nodeid) {
             lNodesAnnouncingHeaderAndIDs.erase(it);
             lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
             return;
         }
         CNodeState *state = State(*it);
         if (state != nullptr && !state->m_is_inbound) {
             ++num_outbound_hb_peers;
         }
     }
     if (nodestate->m_is_inbound) {
         // If we're adding an inbound HB peer, make sure we're not removing
         // our last outbound HB peer in the process.
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
             num_outbound_hb_peers == 1) {
             CNodeState *remove_node =
                 State(lNodesAnnouncingHeaderAndIDs.front());
             if (remove_node != nullptr && !remove_node->m_is_inbound) {
                 // Put the HB outbound peer in the second slot, so that it
                 // doesn't get removed.
                 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
                           *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
             }
         }
     }
     m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
                                   ::cs_main) {
         AssertLockHeld(::cs_main);
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
             // As per BIP152, we only get 3 of our peers to announce
             // blocks using compact encodings.
             m_connman.ForNode(
                 lNodesAnnouncingHeaderAndIDs.front(), [this](CNode *pnodeStop) {
                     m_connman.PushMessage(
                         pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
                                        .Make(NetMsgType::SENDCMPCT,
                                              /*high_bandwidth=*/false,
                                              /*version=*/CMPCTBLOCKS_VERSION));
                     // save BIP152 bandwidth state: we select peer to be
                     // low-bandwidth
                     pnodeStop->m_bip152_highbandwidth_to = false;
                     return true;
                 });
             lNodesAnnouncingHeaderAndIDs.pop_front();
         }
         m_connman.PushMessage(pfrom,
                               CNetMsgMaker(pfrom->GetCommonVersion())
                                   .Make(NetMsgType::SENDCMPCT,
                                         /*high_bandwidth=*/true,
                                         /*version=*/CMPCTBLOCKS_VERSION));
         // save BIP152 bandwidth state: we select peer to be high-bandwidth
         pfrom->m_bip152_highbandwidth_to = true;
         lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
         return true;
     });
 }
 
 bool PeerManagerImpl::TipMayBeStale() {
     AssertLockHeld(cs_main);
     const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
     if (m_last_tip_update.load() == 0s) {
         m_last_tip_update = GetTime<std::chrono::seconds>();
     }
     return m_last_tip_update.load() <
                GetTime<std::chrono::seconds>() -
                    std::chrono::seconds{consensusParams.nPowTargetSpacing *
                                         3} &&
            mapBlocksInFlight.empty();
 }
 
 bool PeerManagerImpl::CanDirectFetch() {
     return m_chainman.ActiveChain().Tip()->GetBlockTime() >
            GetAdjustedTime() -
                m_chainparams.GetConsensus().nPowTargetSpacing * 20;
 }
 
 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (state->pindexBestKnownBlock &&
         pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
         return true;
     }
     if (state->pindexBestHeaderSent &&
         pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
         return true;
     }
     return false;
 }
 
 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (!state->hashLastUnknownBlock.IsNull()) {
         const CBlockIndex *pindex =
             m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
         if (pindex && pindex->nChainWork > 0) {
             if (state->pindexBestKnownBlock == nullptr ||
                 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
                 state->pindexBestKnownBlock = pindex;
             }
             state->hashLastUnknownBlock.SetNull();
         }
     }
 }
 
 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
                                               const BlockHash &hash) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     ProcessBlockAvailability(nodeid);
 
     const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
     if (pindex && pindex->nChainWork > 0) {
         // An actually better block was announced.
         if (state->pindexBestKnownBlock == nullptr ||
             pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
             state->pindexBestKnownBlock = pindex;
         }
     } else {
         // An unknown block was announced; just assume that the latest one is
         // the best one.
         state->hashLastUnknownBlock = hash;
     }
 }
 
 void PeerManagerImpl::FindNextBlocksToDownload(
     NodeId nodeid, unsigned int count,
     std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
     if (count == 0) {
         return;
     }
 
     vBlocks.reserve(vBlocks.size() + count);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Make sure pindexBestKnownBlock is up to date, we'll need it.
     ProcessBlockAvailability(nodeid);
 
     if (state->pindexBestKnownBlock == nullptr ||
         state->pindexBestKnownBlock->nChainWork <
             m_chainman.ActiveChain().Tip()->nChainWork ||
         state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
         // This peer has nothing interesting.
         return;
     }
 
     if (state->pindexLastCommonBlock == nullptr) {
         // Bootstrap quickly by guessing a parent of our best tip is the forking
         // point. Guessing wrong in either direction is not a problem.
         state->pindexLastCommonBlock =
             m_chainman
                 .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
                                         m_chainman.ActiveChain().Height())];
     }
 
     // If the peer reorganized, our previous pindexLastCommonBlock may not be an
     // ancestor of its current tip anymore. Go back enough to fix that.
     state->pindexLastCommonBlock = LastCommonAncestor(
         state->pindexLastCommonBlock, state->pindexBestKnownBlock);
     if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
         return;
     }
 
     std::vector<const CBlockIndex *> vToFetch;
     const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
     // Never fetch further than the best block we know the peer has, or more
     // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
     // common with this peer. The +1 is so we can detect stalling, namely if we
     // would be able to download that next block if the window were 1 larger.
     int nWindowEnd =
         state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
     int nMaxHeight =
         std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
     NodeId waitingfor = -1;
     while (pindexWalk->nHeight < nMaxHeight) {
         // Read up to 128 (or more, if more blocks than that are needed)
         // successors of pindexWalk (towards pindexBestKnownBlock) into
         // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
         // expensive as iterating over ~100 CBlockIndex* entries anyway.
         int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
                                 std::max<int>(count - vBlocks.size(), 128));
         vToFetch.resize(nToFetch);
         pindexWalk = state->pindexBestKnownBlock->GetAncestor(
             pindexWalk->nHeight + nToFetch);
         vToFetch[nToFetch - 1] = pindexWalk;
         for (unsigned int i = nToFetch - 1; i > 0; i--) {
             vToFetch[i - 1] = vToFetch[i]->pprev;
         }
 
         // Iterate over those blocks in vToFetch (in forward direction), adding
         // the ones that are not yet downloaded and not in flight to vBlocks. In
         // the meantime, update pindexLastCommonBlock as long as all ancestors
         // are already downloaded, or if it's already part of our chain (and
         // therefore don't need it even if pruned).
         for (const CBlockIndex *pindex : vToFetch) {
             if (!pindex->IsValid(BlockValidity::TREE)) {
                 // We consider the chain that this peer is on invalid.
                 return;
             }
             if (pindex->nStatus.hasData() ||
                 m_chainman.ActiveChain().Contains(pindex)) {
                 if (pindex->HaveTxsDownloaded()) {
                     state->pindexLastCommonBlock = pindex;
                 }
             } else if (!IsBlockRequested(pindex->GetBlockHash())) {
                 // The block is not already downloaded, and not yet in flight.
                 if (pindex->nHeight > nWindowEnd) {
                     // We reached the end of the window.
                     if (vBlocks.size() == 0 && waitingfor != nodeid) {
                         // We aren't able to fetch anything, but we would be if
                         // the download window was one larger.
                         nodeStaller = waitingfor;
                     }
                     return;
                 }
                 vBlocks.push_back(pindex);
                 if (vBlocks.size() == count) {
                     return;
                 }
             } else if (waitingfor == -1) {
                 // This is the first already-in-flight block.
                 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
             }
         }
     }
 }
 
 } // namespace
 
 template <class InvId>
 static bool TooManyAnnouncements(const CNode &node,
                                  const InvRequestTracker<InvId> &requestTracker,
                                  const DataRequestParameters &requestParams) {
     return !node.HasPermission(
                requestParams.bypass_request_limits_permissions) &&
            requestTracker.Count(node.GetId()) >=
                requestParams.max_peer_announcements;
 }
 
 /**
  * Compute the request time for this announcement, current time plus delays for:
  *   - nonpref_peer_delay for announcements from non-preferred connections
  *   - overloaded_peer_delay for announcements from peers which have at least
  *     max_peer_request_in_flight requests in flight (and don't have
  * NetPermissionFlags::Relay).
  */
 template <class InvId>
 static std::chrono::microseconds
 ComputeRequestTime(const CNode &node,
                    const InvRequestTracker<InvId> &requestTracker,
                    const DataRequestParameters &requestParams,
                    std::chrono::microseconds current_time, bool preferred) {
     auto delay = std::chrono::microseconds{0};
 
     if (!preferred) {
         delay += requestParams.nonpref_peer_delay;
     }
 
     if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
         requestTracker.CountInFlight(node.GetId()) >=
             requestParams.max_peer_request_in_flight) {
         delay += requestParams.overloaded_peer_delay;
     }
 
     return current_time + delay;
 }
 
 void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
                                       const Peer &peer) {
     uint64_t my_services{peer.m_our_services};
     const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
     uint64_t nonce = pnode.GetLocalNonce();
     const int nNodeStartingHeight{m_best_height};
     NodeId nodeid = pnode.GetId();
     CAddress addr = pnode.addr;
     uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
 
     CService addr_you =
         addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
             ? addr
             : CService();
     uint64_t your_services{addr.nServices};
 
     const bool tx_relay = !m_ignore_incoming_txs && !pnode.IsBlockOnlyConn() &&
                           !pnode.IsFeelerConn();
     m_connman.PushMessage(
         // your_services, addr_you: Together the pre-version-31402 serialization
         //     of CAddress "addrYou" (without nTime)
         // my_services, CService(): Together the pre-version-31402 serialization
         //     of CAddress "addrMe" (without nTime)
         &pnode, CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services,
                           nTime, your_services, addr_you, my_services,
                           CService(), nonce, userAgent(config),
                           nNodeStartingHeight, tx_relay, extraEntropy));
 
     if (fLogIPs) {
         LogPrint(BCLog::NET,
                  "send version message: version %d, blocks=%d, them=%s, "
                  "txrelay=%d, peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(),
                  tx_relay, nodeid);
     } else {
         LogPrint(BCLog::NET,
                  "send version message: version %d, blocks=%d, "
                  "txrelay=%d, peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
     }
 }
 
 void PeerManagerImpl::AddTxAnnouncement(
     const CNode &node, const TxId &txid,
     std::chrono::microseconds current_time) {
     // For m_txrequest and state
     AssertLockHeld(::cs_main);
 
     if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
         return;
     }
 
     const bool preferred = isPreferredDownloadPeer(node);
     auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
                                       current_time, preferred);
 
     m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
 }
 
 void PeerManagerImpl::AddProofAnnouncement(
     const CNode &node, const avalanche::ProofId &proofid,
     std::chrono::microseconds current_time, bool preferred) {
     // For m_proofrequest
     AssertLockHeld(cs_proofrequest);
 
     if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
         return;
     }
 
     auto reqtime = ComputeRequestTime(
         node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
 
     m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
 }
 
 void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node,
                                                   int64_t time_in_seconds) {
     LOCK(cs_main);
     CNodeState *state = State(node);
     if (state) {
         state->m_last_block_announcement = time_in_seconds;
     }
 }
 
 void PeerManagerImpl::InitializeNode(const Config &config, CNode &node,
                                      ServiceFlags our_services) {
     NodeId nodeid = node.GetId();
     {
         LOCK(cs_main);
         m_node_states.emplace_hint(m_node_states.end(),
                                    std::piecewise_construct,
                                    std::forward_as_tuple(nodeid),
                                    std::forward_as_tuple(node.IsInboundConn()));
         assert(m_txrequest.Count(nodeid) == 0);
     }
     PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
     {
         LOCK(m_peer_mutex);
         m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
     }
     if (!node.IsInboundConn()) {
         PushNodeVersion(config, node, *peer);
     }
 }
 
 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
     std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
 
     for (const TxId &txid : unbroadcast_txids) {
         // Sanity check: all unbroadcast txns should exist in the mempool
         if (m_mempool.exists(txid)) {
             RelayTransaction(txid);
         } else {
             m_mempool.RemoveUnbroadcastTx(txid, true);
         }
     }
 
     if (g_avalanche && isAvalancheEnabled(gArgs)) {
         // Get and sanitize the list of proofids to broadcast. The RelayProof
         // call is done in a second loop to avoid locking cs_vNodes while
         // cs_peerManager is locked which would cause a potential deadlock due
         // to reversed lock order.
         auto unbroadcasted_proofids =
             g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
                 auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
 
                 auto it = unbroadcasted_proofids.begin();
                 while (it != unbroadcasted_proofids.end()) {
                     // Sanity check: all unbroadcast proofs should be bound to a
                     // peer in the peermanager
                     if (!pm.isBoundToPeer(*it)) {
                         pm.removeUnbroadcastProof(*it);
                         it = unbroadcasted_proofids.erase(it);
                         continue;
                     }
 
                     ++it;
                 }
 
                 return unbroadcasted_proofids;
             });
 
         // Remaining proofids are the ones to broadcast
         for (const auto &proofid : unbroadcasted_proofids) {
             RelayProof(proofid);
         }
     }
 
     // Schedule next run for 10-15 minutes in the future.
     // We add randomness on every cycle to avoid the possibility of P2P
     // fingerprinting.
     const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
     scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
                               reattemptBroadcastInterval);
 }
 
 void PeerManagerImpl::UpdateAvalancheStatistics() const {
     m_connman.ForEachNode([](CNode *pnode) {
         pnode->updateAvailabilityScore(AVALANCHE_STATISTICS_DECAY_FACTOR);
     });
 
     if (!g_avalanche) {
         // Not enabled or not ready yet
         return;
     }
 
     // Generate a peer availability score by computing an exponentially
     // weighted moving average of the average of node availability scores.
     // This ensures the peer score is bound to the lifetime of its proof which
     // incentivizes stable network activity.
     g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
         pm.updateAvailabilityScores(
             AVALANCHE_STATISTICS_DECAY_FACTOR, [&](NodeId nodeid) -> double {
                 double score{0.0};
                 m_connman.ForNode(nodeid, [&](CNode *pavanode) {
                     score = pavanode->getAvailabilityScore();
                     return true;
                 });
                 return score;
             });
     });
 }
 
 void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
     const auto now = GetTime<std::chrono::seconds>();
     std::vector<NodeId> avanode_ids;
     bool fQuorumEstablished;
     bool fShouldRequestMoreNodes;
 
     if (!g_avalanche) {
         // Not enabled or not ready yet, retry later
         goto scheduleLater;
     }
 
     g_avalanche->sendDelayedAvahello();
 
     fQuorumEstablished = g_avalanche->isQuorumEstablished();
     fShouldRequestMoreNodes =
         g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
             return pm.shouldRequestMoreNodes();
         });
 
     m_connman.ForEachNode([&](CNode *pnode) {
         // Build a list of the avalanche peers nodeids
         if (pnode->m_avalanche_enabled &&
             (!fQuorumEstablished || !pnode->IsInboundConn())) {
             avanode_ids.push_back(pnode->GetId());
         }
 
         PeerRef peer = GetPeerRef(pnode->GetId());
         if (peer == nullptr) {
             return;
         }
         // If a proof radix tree timed out, cleanup
         if (peer->m_proof_relay &&
             now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
                    AVALANCHE_AVAPROOFS_TIMEOUT)) {
             peer->m_proof_relay->sharedProofs = {};
         }
     });
 
     if (avanode_ids.empty()) {
         // No node is available for messaging, retry later
         goto scheduleLater;
     }
 
     Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
 
     // Request avalanche addresses from our peers
     for (NodeId avanodeId : avanode_ids) {
         m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
             m_connman.PushMessage(pavanode,
                                   CNetMsgMaker(pavanode->GetCommonVersion())
                                       .Make(NetMsgType::GETAVAADDR));
             PeerRef peer = GetPeerRef(avanodeId);
             WITH_LOCK(peer->m_addr_token_bucket_mutex,
                       peer->m_addr_token_bucket += GetMaxAddrToSend());
             return true;
         });
 
         // If we have no reason to believe that we need more nodes, only request
         // addresses from one of our peers.
         if (fQuorumEstablished && !fShouldRequestMoreNodes) {
             break;
         }
     }
 
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
         // Don't request proofs while in IBD. We're likely to orphan them
         // because we don't have the UTXOs.
         goto scheduleLater;
     }
 
     // If we never had an avaproofs message yet, be kind and only request to a
     // subset of our peers as we expect a ton of avaproofs message in the
     // process.
     if (g_avalanche->getAvaproofsNodeCounter() == 0) {
         avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
     }
 
     for (NodeId nodeid : avanode_ids) {
         // Send a getavaproofs to all of our peers
         m_connman.ForNode(nodeid, [&](CNode *pavanode) {
             PeerRef peer = GetPeerRef(nodeid);
             if (peer->m_proof_relay) {
                 m_connman.PushMessage(pavanode,
                                       CNetMsgMaker(pavanode->GetCommonVersion())
                                           .Make(NetMsgType::GETAVAPROOFS));
 
                 peer->m_proof_relay->compactproofs_requested = true;
             }
             return true;
         });
     }
 
 scheduleLater:
     // Schedule next run for 2-5 minutes in the future.
     // We add randomness on every cycle to avoid the possibility of P2P
     // fingerprinting.
     const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
     scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
                               avalanchePeriodicNetworkingInterval);
 }
 
 void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
     NodeId nodeid = node.GetId();
     int misbehavior{0};
     {
         LOCK(cs_main);
         {
             // We remove the PeerRef from g_peer_map here, but we don't always
             // destruct the Peer. Sometimes another thread is still holding a
             // PeerRef, so the refcount is >= 1. Be careful not to do any
             // processing here that assumes Peer won't be changed before it's
             // destructed.
             PeerRef peer = RemovePeer(nodeid);
             assert(peer != nullptr);
             misbehavior = WITH_LOCK(peer->m_misbehavior_mutex,
                                     return peer->m_misbehavior_score);
             LOCK(m_peer_mutex);
             m_peer_map.erase(nodeid);
         }
         CNodeState *state = State(nodeid);
         assert(state != nullptr);
 
         if (state->fSyncStarted) {
             nSyncStarted--;
         }
 
         for (const QueuedBlock &entry : state->vBlocksInFlight) {
             mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
         }
         WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid));
         m_txrequest.DisconnectedPeer(nodeid);
         m_num_preferred_download_peers -= state->fPreferredDownload;
         m_peers_downloading_from -= (state->nBlocksInFlight != 0);
         assert(m_peers_downloading_from >= 0);
         m_outbound_peers_with_protect_from_disconnect -=
             state->m_chain_sync.m_protect;
         assert(m_outbound_peers_with_protect_from_disconnect >= 0);
 
         m_node_states.erase(nodeid);
 
         if (m_node_states.empty()) {
             // Do a consistency check after the last peer is removed.
             assert(mapBlocksInFlight.empty());
             assert(m_num_preferred_download_peers == 0);
             assert(m_peers_downloading_from == 0);
             assert(m_outbound_peers_with_protect_from_disconnect == 0);
             assert(m_txrequest.Size() == 0);
             assert(m_orphanage.Size() == 0);
         }
     }
 
     if (node.fSuccessfullyConnected && misbehavior == 0 &&
         !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
         // Only change visible addrman state for full outbound peers. We don't
         // call Connected() for feeler connections since they don't have
         // fSuccessfullyConnected set.
         m_addrman.Connected(node.addr);
     }
 
     WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
 
     LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
 }
 
 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
     LOCK(m_peer_mutex);
     auto it = m_peer_map.find(id);
     return it != m_peer_map.end() ? it->second : nullptr;
 }
 
 PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
     PeerRef ret;
     LOCK(m_peer_mutex);
     auto it = m_peer_map.find(id);
     if (it != m_peer_map.end()) {
         ret = std::move(it->second);
         m_peer_map.erase(it);
     }
     return ret;
 }
 
 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
                                         CNodeStateStats &stats) const {
     {
         LOCK(cs_main);
         const CNodeState *state = State(nodeid);
         if (state == nullptr) {
             return false;
         }
         stats.nSyncHeight = state->pindexBestKnownBlock
                                 ? state->pindexBestKnownBlock->nHeight
                                 : -1;
         stats.nCommonHeight = state->pindexLastCommonBlock
                                   ? state->pindexLastCommonBlock->nHeight
                                   : -1;
         for (const QueuedBlock &queue : state->vBlocksInFlight) {
             if (queue.pindex) {
                 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
             }
         }
     }
 
     PeerRef peer = GetPeerRef(nodeid);
     if (peer == nullptr) {
         return false;
     }
     stats.their_services = peer->m_their_services;
     stats.m_starting_height = peer->m_starting_height;
     // It is common for nodes with good ping times to suddenly become lagged,
     // due to a new block arriving or other large transfer.
     // Merely reporting pingtime might fool the caller into thinking the node
     // was still responsive, since pingtime does not update until the ping is
     // complete, which might take a while. So, if a ping is taking an unusually
     // long time in flight, the caller can immediately detect that this is
     // happening.
     auto ping_wait{0us};
     if ((0 != peer->m_ping_nonce_sent) &&
         (0 != peer->m_ping_start.load().count())) {
         ping_wait =
             GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
     }
 
     if (auto tx_relay = peer->GetTxRelay()) {
         stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex,
                                       return tx_relay->m_relay_txs);
         stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
     } else {
         stats.m_relay_txs = false;
         stats.m_fee_filter_received = Amount::zero();
     }
 
     stats.m_ping_wait = ping_wait;
     stats.m_addr_processed = peer->m_addr_processed.load();
     stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
     stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
 
     return true;
 }
 
 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
     size_t max_extra_txn = gArgs.GetIntArg(
         "-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
     if (max_extra_txn <= 0) {
         return;
     }
 
     if (!vExtraTxnForCompact.size()) {
         vExtraTxnForCompact.resize(max_extra_txn);
     }
 
     vExtraTxnForCompact[vExtraTxnForCompactIt] =
         std::make_pair(tx->GetHash(), tx);
     vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
 }
 
 void PeerManagerImpl::Misbehaving(const NodeId pnode, const int howmuch,
                                   const std::string &message) {
     assert(howmuch > 0);
 
     PeerRef peer = GetPeerRef(pnode);
     if (peer == nullptr) {
         return;
     }
 
     LOCK(peer->m_misbehavior_mutex);
     const int score_before{peer->m_misbehavior_score};
     peer->m_misbehavior_score += howmuch;
     const int score_now{peer->m_misbehavior_score};
 
     const std::string message_prefixed =
         message.empty() ? "" : (": " + message);
     std::string warning;
 
     if (score_now >= DISCOURAGEMENT_THRESHOLD &&
         score_before < DISCOURAGEMENT_THRESHOLD) {
         warning = " DISCOURAGE THRESHOLD EXCEEDED";
         peer->m_should_discourage = true;
     }
 
     LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n", pnode,
              score_before, score_now, warning, message_prefixed);
 }
 
 bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
                                               const BlockValidationState &state,
                                               bool via_compact_block,
                                               const std::string &message) {
     switch (state.GetResult()) {
         case BlockValidationResult::BLOCK_RESULT_UNSET:
             break;
         // The node is providing invalid data:
         case BlockValidationResult::BLOCK_CONSENSUS:
         case BlockValidationResult::BLOCK_MUTATED:
             if (!via_compact_block) {
                 Misbehaving(nodeid, 100, message);
                 return true;
             }
             break;
         case BlockValidationResult::BLOCK_CACHED_INVALID: {
             LOCK(cs_main);
             CNodeState *node_state = State(nodeid);
             if (node_state == nullptr) {
                 break;
             }
 
             // Ban outbound (but not inbound) peers if on an invalid chain.
             // Exempt HB compact block peers. Manual connections are always
             // protected from discouragement.
             if (!via_compact_block && !node_state->m_is_inbound) {
                 Misbehaving(nodeid, 100, message);
                 return true;
             }
             break;
         }
         case BlockValidationResult::BLOCK_INVALID_HEADER:
         case BlockValidationResult::BLOCK_CHECKPOINT:
         case BlockValidationResult::BLOCK_INVALID_PREV:
             Misbehaving(nodeid, 100, message);
             return true;
         case BlockValidationResult::BLOCK_FINALIZATION:
             // TODO: Use the state object to report this is probably not the
             // best idea. This is effectively unreachable, unless there is a bug
             // somewhere.
             Misbehaving(nodeid, 20, message);
             return true;
         // Conflicting (but not necessarily invalid) data or different policy:
         case BlockValidationResult::BLOCK_MISSING_PREV:
             // TODO: Handle this much more gracefully (10 DoS points is super
             // arbitrary)
             Misbehaving(nodeid, 10, message);
             return true;
         case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE:
         case BlockValidationResult::BLOCK_TIME_FUTURE:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
                                            const TxValidationState &state,
                                            const std::string &message) {
     switch (state.GetResult()) {
         case TxValidationResult::TX_RESULT_UNSET:
             break;
         // The node is providing invalid data:
         case TxValidationResult::TX_CONSENSUS:
             Misbehaving(nodeid, 100, message);
             return true;
         // Conflicting (but not necessarily invalid) data or different policy:
         case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
         case TxValidationResult::TX_INPUTS_NOT_STANDARD:
         case TxValidationResult::TX_NOT_STANDARD:
         case TxValidationResult::TX_MISSING_INPUTS:
         case TxValidationResult::TX_PREMATURE_SPEND:
         case TxValidationResult::TX_CONFLICT:
         case TxValidationResult::TX_MEMPOOL_POLICY:
         case TxValidationResult::TX_NO_MEMPOOL:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
     if (m_chainman.ActiveChain().Contains(pindex)) {
         return true;
     }
     return pindex->IsValid(BlockValidity::SCRIPTS) &&
            (m_chainman.m_best_header != nullptr) &&
            (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() <
             STALE_RELAY_AGE_LIMIT) &&
            (GetBlockProofEquivalentTime(
                 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
                 m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
 }
 
 std::optional<std::string>
 PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
                             const CBlockIndex &block_index) {
     if (fImporting) {
         return "Importing...";
     }
     if (fReindex) {
         return "Reindexing...";
     }
 
     LOCK(cs_main);
     // Ensure this peer exists and hasn't been disconnected
     CNodeState *state = State(peer_id);
     if (state == nullptr) {
         return "Peer does not exist";
     }
     // Mark block as in-flight unless it already is (for this peer).
     // If a block was already in-flight for a different peer, its BLOCKTXN
     // response will be dropped.
     if (!BlockRequested(config, peer_id, block_index)) {
         return "Already requested from this peer";
     }
 
     // Construct message to request the block
     const BlockHash &hash{block_index.GetBlockHash()};
     const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
 
     // Send block request message to the peer
     if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
             const CNetMsgMaker msgMaker(node->GetCommonVersion());
             this->m_connman.PushMessage(
                 node, msgMaker.Make(NetMsgType::GETDATA, invs));
             return true;
         })) {
         return "Node not fully connected";
     }
 
     LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
              peer_id);
     return std::nullopt;
 }
 
 std::unique_ptr<PeerManager> PeerManager::make(CConnman &connman,
                                                AddrMan &addrman, BanMan *banman,
                                                ChainstateManager &chainman,
                                                CTxMemPool &pool,
                                                bool ignore_incoming_txs) {
     return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
                                              pool, ignore_incoming_txs);
 }
 
 PeerManagerImpl::PeerManagerImpl(CConnman &connman, AddrMan &addrman,
                                  BanMan *banman, ChainstateManager &chainman,
                                  CTxMemPool &pool, bool ignore_incoming_txs)
     : m_chainparams(chainman.GetParams()), m_connman(connman),
       m_addrman(addrman), m_banman(banman), m_chainman(chainman),
       m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) {}
 
 void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
     // Stale tip checking and peer eviction are on two different timers, but we
     // don't want them to get out of sync due to drift in the scheduler, so we
     // combine them in one function and schedule at the quicker (peer-eviction)
     // timer.
     static_assert(
         EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL,
         "peer eviction timer should be less than stale tip check timer");
     scheduler.scheduleEvery(
         [this]() {
             this->CheckForStaleTipAndEvictPeers();
             return true;
         },
         std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
 
     // schedule next run for 10-15 minutes in the future
     const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
     scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
                               reattemptBroadcastInterval);
 
     // Update the avalanche statistics on a schedule
     scheduler.scheduleEvery(
         [this]() {
             UpdateAvalancheStatistics();
             return true;
         },
         AVALANCHE_STATISTICS_REFRESH_PERIOD);
 
     // schedule next run for 2-5 minutes in the future
     const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
     scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
                               avalanchePeriodicNetworkingInterval);
 }
 
 /**
  * Evict orphan txn pool entries based on a newly connected
  * block, remember the recently confirmed transactions, and delete tracked
  * announcements for them. Also save the time of the last tip update.
  */
 void PeerManagerImpl::BlockConnected(
     const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) {
     m_orphanage.EraseForBlock(*pblock);
     m_last_tip_update = GetTime<std::chrono::seconds>();
 
     {
         LOCK(m_recent_confirmed_transactions_mutex);
         for (const CTransactionRef &ptx : pblock->vtx) {
             m_recent_confirmed_transactions.insert(ptx->GetId());
         }
     }
     {
         LOCK(cs_main);
         for (const auto &ptx : pblock->vtx) {
             m_txrequest.ForgetInvId(ptx->GetId());
         }
     }
 }
 
 void PeerManagerImpl::BlockDisconnected(
     const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
     // To avoid relay problems with transactions that were previously
     // confirmed, clear our filter of recently confirmed transactions whenever
     // there's a reorg.
     // This means that in a 1-block reorg (where 1 block is disconnected and
     // then another block reconnected), our filter will drop to having only one
     // block's worth of transactions in it, but that should be fine, since
     // presumably the most common case of relaying a confirmed transaction
     // should be just after a new block containing it is found.
     LOCK(m_recent_confirmed_transactions_mutex);
     m_recent_confirmed_transactions.reset();
 }
 
 /**
  * Maintain state about the best-seen block and fast-announce a compact block
  * to compatible peers.
  */
 void PeerManagerImpl::NewPoWValidBlock(
     const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
         std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
     const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
 
     LOCK(cs_main);
 
     if (pindex->nHeight <= m_highest_fast_announce) {
         return;
     }
     m_highest_fast_announce = pindex->nHeight;
 
     BlockHash hashBlock(pblock->GetHash());
     const std::shared_future<CSerializedNetMsg> lazy_ser{
         std::async(std::launch::deferred, [&] {
             return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock);
         })};
 
     {
         LOCK(m_most_recent_block_mutex);
         m_most_recent_block_hash = hashBlock;
         m_most_recent_block = pblock;
         m_most_recent_compact_block = pcmpctblock;
     }
 
     m_connman.ForEachNode(
         [this, pindex, &lazy_ser, &hashBlock](CNode *pnode)
             EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
                 AssertLockHeld(::cs_main);
 
                 if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION ||
                     pnode->fDisconnect) {
                     return;
                 }
                 ProcessBlockAvailability(pnode->GetId());
                 CNodeState &state = *State(pnode->GetId());
                 // If the peer has, or we announced to them the previous block
                 // already, but we don't think they have this one, go ahead and
                 // announce it.
                 if (state.m_requested_hb_cmpctblocks &&
                     !PeerHasHeader(&state, pindex) &&
                     PeerHasHeader(&state, pindex->pprev)) {
                     LogPrint(BCLog::NET,
                              "%s sending header-and-ids %s to peer=%d\n",
                              "PeerManager::NewPoWValidBlock",
                              hashBlock.ToString(), pnode->GetId());
 
                     const CSerializedNetMsg &ser_cmpctblock{lazy_ser.get()};
                     m_connman.PushMessage(
                         pnode, CSerializedNetMsg{ser_cmpctblock.data,
                                                  ser_cmpctblock.m_type});
                     state.pindexBestHeaderSent = pindex;
                 }
             });
 }
 
 /**
  * Update our best height and announce any block hashes which weren't previously
  * in m_chainman.ActiveChain() to our peers.
  */
 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                       const CBlockIndex *pindexFork,
                                       bool fInitialDownload) {
     SetBestHeight(pindexNew->nHeight);
     SetServiceFlagsIBDCache(!fInitialDownload);
 
     // Don't relay inventory during initial block download.
     if (fInitialDownload) {
         return;
     }
 
     // Find the hashes of all blocks that weren't previously in the best chain.
     std::vector<BlockHash> vHashes;
     const CBlockIndex *pindexToAnnounce = pindexNew;
     while (pindexToAnnounce != pindexFork) {
         vHashes.push_back(pindexToAnnounce->GetBlockHash());
         pindexToAnnounce = pindexToAnnounce->pprev;
         if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
             // Limit announcements in case of a huge reorganization. Rely on the
             // peer's synchronization mechanism in that case.
             break;
         }
     }
 
     {
         LOCK(m_peer_mutex);
         for (auto &it : m_peer_map) {
             Peer &peer = *it.second;
             LOCK(peer.m_block_inv_mutex);
             for (const BlockHash &hash : reverse_iterate(vHashes)) {
                 peer.m_blocks_for_headers_relay.push_back(hash);
             }
         }
     }
 
     m_connman.WakeMessageHandler();
 }
 
 /**
  * Handle invalid block rejection and consequent peer banning, maintain which
  * peers announce compact blocks.
  */
 void PeerManagerImpl::BlockChecked(const CBlock &block,
                                    const BlockValidationState &state) {
     LOCK(cs_main);
 
     const BlockHash hash = block.GetHash();
     std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
         mapBlockSource.find(hash);
 
     // If the block failed validation, we know where it came from and we're
     // still connected to that peer, maybe punish.
     if (state.IsInvalid() && it != mapBlockSource.end() &&
         State(it->second.first)) {
         MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
                                 /*via_compact_block=*/!it->second.second);
     }
     // Check that:
     // 1. The block is valid
     // 2. We're not in initial block download
     // 3. This is currently the best block we're aware of. We haven't updated
     //    the tip yet so we have no way to check this directly here. Instead we
     //    just check that there are currently no other blocks in flight.
     else if (state.IsValid() &&
              !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
              mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
         if (it != mapBlockSource.end()) {
             MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
         }
     }
 
     if (it != mapBlockSource.end()) {
         mapBlockSource.erase(it);
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Messages
 //
 
 bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid) {
     if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
         hashRecentRejectsChainTip) {
         // If the chain tip has changed previously rejected transactions
         // might be now valid, e.g. due to a nLockTime'd tx becoming
         // valid, or a double-spend. Reset the rejects filter and give
         // those txs a second chance.
         hashRecentRejectsChainTip =
             m_chainman.ActiveChain().Tip()->GetBlockHash();
         m_recent_rejects.reset();
     }
 
     if (m_orphanage.HaveTx(txid)) {
         return true;
     }
 
     {
         LOCK(m_recent_confirmed_transactions_mutex);
         if (m_recent_confirmed_transactions.contains(txid)) {
             return true;
         }
     }
 
     return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
 }
 
 bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
     return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
 }
 
 bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
     assert(g_avalanche);
 
     auto localProof = g_avalanche->getLocalProof();
     if (localProof && localProof->getId() == proofid) {
         return true;
     }
 
     return g_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
         return pm.exists(proofid) || pm.isInvalid(proofid);
     });
 }
 
 void PeerManagerImpl::SendPings() {
     LOCK(m_peer_mutex);
     for (auto &it : m_peer_map) {
         it.second->m_ping_queued = true;
     }
 }
 
 void PeerManagerImpl::RelayTransaction(const TxId &txid) {
     LOCK(m_peer_mutex);
     for (auto &it : m_peer_map) {
         Peer &peer = *it.second;
         auto tx_relay = peer.GetTxRelay();
         if (!tx_relay) {
             continue;
         }
         LOCK(tx_relay->m_tx_inventory_mutex);
         if (!tx_relay->m_tx_inventory_known_filter.contains(txid)) {
             tx_relay->m_tx_inventory_to_send.insert(txid);
         }
     }
 }
 
 void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
     LOCK(m_peer_mutex);
     for (auto &it : m_peer_map) {
         Peer &peer = *it.second;
 
         if (!peer.m_proof_relay) {
             continue;
         }
         LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
         if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
                 proofid)) {
             peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
         }
     }
 }
 
 void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
                                    bool fReachable) {
     // We choose the same nodes within a given 24h window (if the list of
     // connected nodes does not change) and we don't relay to nodes that already
     // know an address. So within 24h we will likely relay a given address once.
     // This is to prevent a peer from unjustly giving their address better
     // propagation by sending it to us repeatedly.
 
     if (!fReachable && !addr.IsRelayable()) {
         return;
     }
 
     // Relay to a limited number of other nodes
     // Use deterministic randomness to send to the same nodes for 24 hours
     // at a time so the m_addr_knowns of the chosen nodes prevent repeats
     const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
     const auto current_time{GetTime<std::chrono::seconds>()};
     // Adding address hash makes exact rotation time different per address,
     // while preserving periodicity.
     const uint64_t time_addr{
         (static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) /
         count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
 
     const CSipHasher hasher{
         m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
             .Write(hash_addr)
             .Write(time_addr)};
     FastRandomContext insecure_rand;
 
     // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
     // randomly to 1 or 2 peers.
     unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
     std::array<std::pair<uint64_t, Peer *>, 2> best{
         {{0, nullptr}, {0, nullptr}}};
     assert(nRelayNodes <= best.size());
 
     LOCK(m_peer_mutex);
 
     for (auto &[id, peer] : m_peer_map) {
         if (peer->m_addr_relay_enabled && id != originator &&
             IsAddrCompatible(*peer, addr)) {
             uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
             for (unsigned int i = 0; i < nRelayNodes; i++) {
                 if (hashKey > best[i].first) {
                     std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
                               best.begin() + i + 1);
                     best[i] = std::make_pair(hashKey, peer.get());
                     break;
                 }
             }
         }
     };
 
     for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
         PushAddress(*best[i].second, addr, insecure_rand);
     }
 }
 
 void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
                                           Peer &peer, const CInv &inv) {
     const BlockHash hash(inv.hash);
 
     std::shared_ptr<const CBlock> a_recent_block;
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
     {
         LOCK(m_most_recent_block_mutex);
         a_recent_block = m_most_recent_block;
         a_recent_compact_block = m_most_recent_compact_block;
     }
 
     bool need_activate_chain = false;
     {
         LOCK(cs_main);
         const CBlockIndex *pindex =
             m_chainman.m_blockman.LookupBlockIndex(hash);
         if (pindex) {
             if (pindex->HaveTxsDownloaded() &&
                 !pindex->IsValid(BlockValidity::SCRIPTS) &&
                 pindex->IsValid(BlockValidity::TREE)) {
                 // If we have the block and all of its parents, but have not yet
                 // validated it, we might be in the middle of connecting it (ie
                 // in the unlock of cs_main before ActivateBestChain but after
                 // AcceptBlock). In this case, we need to run ActivateBestChain
                 // prior to checking the relay conditions below.
                 need_activate_chain = true;
             }
         }
     } // release cs_main before calling ActivateBestChain
     if (need_activate_chain) {
         BlockValidationState state;
         if (!m_chainman.ActiveChainstate().ActivateBestChain(config, state,
                                                              a_recent_block)) {
             LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                      state.ToString());
         }
     }
 
     LOCK(cs_main);
     const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
     if (!pindex) {
         return;
     }
     if (!BlockRequestAllowed(pindex)) {
         LogPrint(BCLog::NET,
                  "%s: ignoring request from peer=%i for old "
                  "block that isn't in the main chain\n",
                  __func__, pfrom.GetId());
         return;
     }
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
     // Disconnect node in case we have reached the outbound limit for serving
     // historical blocks.
     if (m_connman.OutboundTargetReached(true) &&
         (((m_chainman.m_best_header != nullptr) &&
           (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() >
            HISTORICAL_BLOCK_AGE)) ||
          inv.IsMsgFilteredBlk()) &&
         // nodes with the download permission may exceed target
         !pfrom.HasPermission(NetPermissionFlags::Download)) {
         LogPrint(BCLog::NET,
                  "historical block serving limit reached, disconnect peer=%d\n",
                  pfrom.GetId());
         pfrom.fDisconnect = true;
         return;
     }
     // Avoid leaking prune-height by never sending blocks below the
     // NODE_NETWORK_LIMITED threshold.
     // Add two blocks buffer extension for possible races
     if (!pfrom.HasPermission(NetPermissionFlags::NoBan) &&
         ((((peer.m_our_services & NODE_NETWORK_LIMITED) ==
            NODE_NETWORK_LIMITED) &&
           ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) &&
           (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight >
            (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
         LogPrint(BCLog::NET,
                  "Ignore block request below NODE_NETWORK_LIMITED "
                  "threshold, disconnect peer=%d\n",
                  pfrom.GetId());
 
         // disconnect node and prevent it from stalling (would otherwise wait
         // for the missing block)
         pfrom.fDisconnect = true;
         return;
     }
     // Pruned nodes may have deleted the block, so check whether it's available
     // before trying to send.
     if (!pindex->nStatus.hasData()) {
         return;
     }
     std::shared_ptr<const CBlock> pblock;
     if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
         pblock = a_recent_block;
     } else {
         // Send block from disk
         std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
         if (!ReadBlockFromDisk(*pblockRead, pindex,
                                m_chainparams.GetConsensus())) {
             assert(!"cannot load block from disk");
         }
         pblock = pblockRead;
     }
     if (inv.IsMsgBlk()) {
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::BLOCK, *pblock));
     } else if (inv.IsMsgFilteredBlk()) {
         bool sendMerkleBlock = false;
         CMerkleBlock merkleBlock;
         if (auto tx_relay = peer.GetTxRelay()) {
             LOCK(tx_relay->m_bloom_filter_mutex);
             if (tx_relay->m_bloom_filter) {
                 sendMerkleBlock = true;
                 merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
             }
         }
         if (sendMerkleBlock) {
             m_connman.PushMessage(
                 &pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
             // CMerkleBlock just contains hashes, so also push any
             // transactions in the block the client did not see. This avoids
             // hurting performance by pointlessly requiring a round-trip.
             // Note that there is currently no way for a node to request any
             // single transactions we didn't send here - they must either
             // disconnect and retry or request the full block. Thus, the
             // protocol spec specified allows for us to provide duplicate
             // txn here, however we MUST always provide at least what the
             // remote peer needs.
             typedef std::pair<size_t, uint256> PairType;
             for (PairType &pair : merkleBlock.vMatchedTxn) {
                 m_connman.PushMessage(
                     &pfrom,
                     msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first]));
             }
         }
         // else
         // no response
     } else if (inv.IsMsgCmpctBlk()) {
         // If a peer is asking for old blocks, we're almost guaranteed they
         // won't have a useful mempool to match against a compact block, and
         // we don't feel like constructing the object for them, so instead
         // we respond with the full, non-compact block.
         int nSendFlags = 0;
         if (CanDirectFetch() &&
             pindex->nHeight >=
                 m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
             if (a_recent_compact_block &&
                 a_recent_compact_block->header.GetHash() ==
                     pindex->GetBlockHash()) {
                 m_connman.PushMessage(&pfrom,
                                       msgMaker.Make(NetMsgType::CMPCTBLOCK,
                                                     *a_recent_compact_block));
             } else {
                 CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                           cmpctblock));
             }
         } else {
             m_connman.PushMessage(
                 &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
         }
     }
 
     {
         LOCK(peer.m_block_inv_mutex);
         // Trigger the peer node to send a getblocks request for the next
         // batch of inventory.
         if (hash == peer.m_continuation_block) {
             // Send immediately. This must send even if redundant, and
             // we want it right after the last block so they don't wait for
             // other stuff first.
             std::vector<CInv> vInv;
             vInv.push_back(CInv(
                 MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
             m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
             peer.m_continuation_block = BlockHash();
         }
     }
 }
 
 CTransactionRef
 PeerManagerImpl::FindTxForGetData(const CNode &peer, const TxId &txid,
                                   const std::chrono::seconds mempool_req,
                                   const std::chrono::seconds now) {
     auto txinfo = m_mempool.info(txid);
     if (txinfo.tx) {
         // If a TX could have been INVed in reply to a MEMPOOL request,
         // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
         // unconditionally.
         if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
             txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
             return std::move(txinfo.tx);
         }
     }
 
     {
         LOCK(cs_main);
 
         // Otherwise, the transaction must have been announced recently.
         if (State(peer.GetId())->m_recently_announced_invs.contains(txid)) {
             // If it was, it can be relayed from either the mempool...
             if (txinfo.tx) {
                 return std::move(txinfo.tx);
             }
             // ... or the relay pool.
             auto mi = mapRelay.find(txid);
             if (mi != mapRelay.end()) {
                 return mi->second;
             }
         }
     }
 
     return {};
 }
 
 //! Determine whether or not a peer can request a proof, and return it (or
 //! nullptr if not found or not allowed).
 avalanche::ProofRef
 PeerManagerImpl::FindProofForGetData(const CNode &peer,
                                      const avalanche::ProofId &proofid,
                                      const std::chrono::seconds now) {
     avalanche::ProofRef proof;
 
     bool send_unconditionally =
         g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
             return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
                 proof = peer.proof;
 
                 // If we know that proof for long enough, allow for requesting
                 // it.
                 return peer.registration_time <=
                        now - UNCONDITIONAL_RELAY_DELAY;
             });
         });
 
     if (!proof) {
         // Always send our local proof if it gets requested, assuming it's
         // valid. This will make it easier to bind with peers upon startup where
         // the status of our proof is unknown pending for a block. Note that it
         // still needs to have been announced first (presumably via an avahello
         // message).
         proof = g_avalanche->getLocalProof();
     }
 
     // We don't have this proof
     if (!proof) {
         return avalanche::ProofRef();
     }
 
     if (send_unconditionally) {
         return proof;
     }
 
     // Otherwise, the proofs must have been announced recently.
     LOCK(cs_main);
     if (State(peer.GetId())->m_recently_announced_proofs.contains(proofid)) {
         return proof;
     }
 
     return avalanche::ProofRef();
 }
 
 void PeerManagerImpl::ProcessGetData(
     const Config &config, CNode &pfrom, Peer &peer,
     const std::atomic<bool> &interruptMsgProc) {
     AssertLockNotHeld(cs_main);
 
     auto tx_relay = peer.GetTxRelay();
 
     std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
     std::vector<CInv> vNotFound;
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     const auto now{GetTime<std::chrono::seconds>()};
     // Get last mempool request time
     const auto mempool_req = tx_relay != nullptr
                                  ? tx_relay->m_last_mempool_req.load()
                                  : std::chrono::seconds::min();
 
     // Process as many TX or AVA_PROOF items from the front of the getdata
     // queue as possible, since they're common and it's efficient to batch
     // process them.
     while (it != peer.m_getdata_requests.end()) {
         if (interruptMsgProc) {
             return;
         }
         // The send buffer provides backpressure. If there's no space in
         // the buffer, pause processing until the next call.
         if (pfrom.fPauseSend) {
             break;
         }
 
         const CInv &inv = *it;
 
         if (it->IsMsgProof()) {
             const avalanche::ProofId proofid(inv.hash);
             auto proof = FindProofForGetData(pfrom, proofid, now);
             if (proof) {
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
                 g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
                     pm.removeUnbroadcastProof(proofid);
                 });
             } else {
                 vNotFound.push_back(inv);
             }
 
             ++it;
             continue;
         }
 
         if (it->IsMsgTx()) {
             if (tx_relay == nullptr) {
                 // Ignore GETDATA requests for transactions from
                 // block-relay-only peers and peers that asked us not to
                 // announce transactions.
                 continue;
             }
 
             const TxId txid(inv.hash);
             CTransactionRef tx =
                 FindTxForGetData(pfrom, txid, mempool_req, now);
             if (tx) {
                 int nSendFlags = 0;
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
                 m_mempool.RemoveUnbroadcastTx(txid);
                 // As we're going to send tx, make sure its unconfirmed parents
                 // are made requestable.
                 std::vector<TxId> parent_ids_to_add;
                 {
                     LOCK(m_mempool.cs);
                     auto txiter = m_mempool.GetIter(tx->GetId());
                     if (txiter) {
                         const CTxMemPoolEntry::Parents &parents =
                             (*txiter)->GetMemPoolParentsConst();
                         parent_ids_to_add.reserve(parents.size());
                         for (const CTxMemPoolEntry &parent : parents) {
                             if (parent.GetTime() >
                                 now - UNCONDITIONAL_RELAY_DELAY) {
                                 parent_ids_to_add.push_back(
                                     parent.GetTx().GetId());
                             }
                         }
                     }
                 }
                 for (const TxId &parent_txid : parent_ids_to_add) {
                     // Relaying a transaction with a recent but unconfirmed
                     // parent.
                     if (WITH_LOCK(tx_relay->m_tx_inventory_mutex,
                                   return !tx_relay->m_tx_inventory_known_filter
                                               .contains(parent_txid))) {
                         LOCK(cs_main);
                         State(pfrom.GetId())
                             ->m_recently_announced_invs.insert(parent_txid);
                     }
                 }
             } else {
                 vNotFound.push_back(inv);
             }
 
             ++it;
             continue;
         }
 
         // It's neither a proof nor a transaction
         break;
     }
 
     // Only process one BLOCK item per call, since they're uncommon and can be
     // expensive to process.
     if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
         const CInv &inv = *it++;
         if (inv.IsGenBlkMsg()) {
             ProcessGetBlockData(config, pfrom, peer, inv);
         }
         // else: If the first item on the queue is an unknown type, we erase it
         // and continue processing the queue on the next call.
     }
 
     peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
 
     if (!vNotFound.empty()) {
         // Let the peer know that we didn't find what it asked for, so it
         // doesn't have to wait around forever. SPV clients care about this
         // message: it's needed when they are recursively walking the
         // dependencies of relevant unconfirmed transactions. SPV clients want
         // to do that because they want to know about (and store and rebroadcast
         // and risk analyze) the dependencies of transactions relevant to them,
         // without having to download the entire memory pool. Also, other nodes
         // can use these messages to automatically request a transaction from
         // some other peer that annnounced it, and stop waiting for us to
         // respond. In normal operation, we often send NOTFOUND messages for
         // parents of transactions that we relay; if a peer is missing a parent,
         // they may assume we have them and request the parents from us.
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
     }
 }
 
 void PeerManagerImpl::SendBlockTransactions(
     CNode &pfrom, const CBlock &block, const BlockTransactionsRequest &req) {
     BlockTransactions resp(req);
     for (size_t i = 0; i < req.indices.size(); i++) {
         if (req.indices[i] >= block.vtx.size()) {
             Misbehaving(pfrom, 100,
                         "getblocktxn with out-of-bounds tx indices");
             return;
         }
         resp.txn[i] = block.vtx[req.indices[i]];
     }
     LOCK(cs_main);
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
     int nSendFlags = 0;
     m_connman.PushMessage(
         &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
 }
 
 void PeerManagerImpl::ProcessHeadersMessage(
     const Config &config, CNode &pfrom, const Peer &peer,
     const std::vector<CBlockHeader> &headers, bool via_compact_block) {
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
     size_t nCount = headers.size();
 
     if (nCount == 0) {
         // Nothing interesting. Stop asking this peers for more headers.
         return;
     }
 
     bool received_new_header = false;
     const CBlockIndex *pindexLast = nullptr;
     {
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom.GetId());
 
         // If this looks like it could be a block announcement (nCount <
         // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
         // don't connect:
         // - Send a getheaders message in response to try to connect the chain.
         // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
         // don't connect before giving DoS points
         // - Once a headers message is received that is valid and does connect,
         // nUnconnectingHeaders gets reset back to 0.
         if (!m_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) &&
             nCount < MAX_BLOCKS_TO_ANNOUNCE) {
             nodestate->nUnconnectingHeaders++;
             m_connman.PushMessage(
                 &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
                                       m_chainman.ActiveChain().GetLocator(
                                           m_chainman.m_best_header),
                                       uint256()));
             LogPrint(
                 BCLog::NET,
                 "received header %s: missing prev block %s, sending getheaders "
                 "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
                 headers[0].GetHash().ToString(),
                 headers[0].hashPrevBlock.ToString(),
                 m_chainman.m_best_header->nHeight, pfrom.GetId(),
                 nodestate->nUnconnectingHeaders);
             // Set hashLastUnknownBlock for this peer, so that if we eventually
             // get the headers - even from a different peer - we can use this
             // peer to download.
             UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash());
 
             if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS ==
                 0) {
                 // The peer is sending us many headers we can't connect.
                 Misbehaving(pfrom, 20,
                             strprintf("%d non-connecting headers",
                                       nodestate->nUnconnectingHeaders));
             }
             return;
         }
 
         BlockHash hashLastBlock;
         for (const CBlockHeader &header : headers) {
             if (!hashLastBlock.IsNull() &&
                 header.hashPrevBlock != hashLastBlock) {
                 Misbehaving(pfrom, 20, "non-continuous headers sequence");
                 return;
             }
             hashLastBlock = header.GetHash();
         }
 
         // If we don't have the last header, then they'll have given us
         // something new (if these headers are valid).
         if (!m_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) {
             received_new_header = true;
         }
     }
 
     BlockValidationState state;
     if (!m_chainman.ProcessNewBlockHeaders(config, headers, state,
                                            &pindexLast)) {
         if (state.IsInvalid()) {
             MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
                                     "invalid header received");
             return;
         }
     }
 
     {
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom.GetId());
         if (nodestate->nUnconnectingHeaders > 0) {
             LogPrint(BCLog::NET,
                      "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
                      pfrom.GetId(), nodestate->nUnconnectingHeaders);
         }
         nodestate->nUnconnectingHeaders = 0;
 
         assert(pindexLast);
         UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
 
         // From here, pindexBestKnownBlock should be guaranteed to be non-null,
         // because it is set in UpdateBlockAvailability. Some nullptr checks are
         // still present, however, as belt-and-suspenders.
 
         if (received_new_header &&
             pindexLast->nChainWork >
                 m_chainman.ActiveChain().Tip()->nChainWork) {
             nodestate->m_last_block_announcement = GetTime();
         }
 
         if (nCount == MAX_HEADERS_RESULTS) {
             // Headers message had its maximum size; the peer may have more
             // headers.
             // TODO: optimize: if pindexLast is an ancestor of
             // m_chainman.ActiveChain().Tip or m_chainman.m_best_header,
             // continue from there instead.
             LogPrint(
                 BCLog::NET,
                 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
                 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
             m_connman.PushMessage(
                 &pfrom,
                 msgMaker.Make(NetMsgType::GETHEADERS,
                               m_chainman.ActiveChain().GetLocator(pindexLast),
                               uint256()));
         }
 
         // If this set of headers is valid and ends in a block with at least as
         // much work as our tip, download as much as possible.
         if (CanDirectFetch() && pindexLast->IsValid(BlockValidity::TREE) &&
             m_chainman.ActiveChain().Tip()->nChainWork <=
                 pindexLast->nChainWork) {
             std::vector<const CBlockIndex *> vToFetch;
             const CBlockIndex *pindexWalk = pindexLast;
             // Calculate all the blocks we'd need to switch to pindexLast, up to
             // a limit.
             while (pindexWalk &&
                    !m_chainman.ActiveChain().Contains(pindexWalk) &&
                    vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                 if (!pindexWalk->nStatus.hasData() &&
                     !IsBlockRequested(pindexWalk->GetBlockHash())) {
                     // We don't have this block, and it's not yet in flight.
                     vToFetch.push_back(pindexWalk);
                 }
                 pindexWalk = pindexWalk->pprev;
             }
             // If pindexWalk still isn't on our main chain, we're looking at a
             // very large reorg at a time we think we're close to caught up to
             // the main chain -- this shouldn't really happen. Bail out on the
             // direct fetch and rely on parallel download instead.
             if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
                 LogPrint(
                     BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
                     pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
             } else {
                 std::vector<CInv> vGetData;
                 // Download as much as possible, from earliest to latest.
                 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
                     if (nodestate->nBlocksInFlight >=
                         MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                         // Can't download any more from this peer
                         break;
                     }
                     vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
                     BlockRequested(config, pfrom.GetId(), *pindex);
                     LogPrint(BCLog::NET, "Requesting block %s from  peer=%d\n",
                              pindex->GetBlockHash().ToString(), pfrom.GetId());
                 }
                 if (vGetData.size() > 1) {
                     LogPrint(BCLog::NET,
                              "Downloading blocks toward %s (%d) via headers "
                              "direct fetch\n",
                              pindexLast->GetBlockHash().ToString(),
                              pindexLast->nHeight);
                 }
                 if (vGetData.size() > 0) {
                     if (!m_ignore_incoming_txs &&
                         nodestate->m_provides_cmpctblocks &&
                         vGetData.size() == 1 && mapBlocksInFlight.size() == 1 &&
                         pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
                         // In any case, we want to download using a compact
                         // block, not a regular one.
                         vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
                     }
                     m_connman.PushMessage(
                         &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
                 }
             }
         }
         // If we're in IBD, we want outbound peers that will serve us a useful
         // chain. Disconnect peers that are on chains with insufficient work.
         if (m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
             nCount != MAX_HEADERS_RESULTS) {
             // When nCount < MAX_HEADERS_RESULTS, we know we have no more
             // headers to fetch from this peer.
             if (nodestate->pindexBestKnownBlock &&
                 nodestate->pindexBestKnownBlock->nChainWork <
                     nMinimumChainWork) {
                 // This peer has too little work on their headers chain to help
                 // us sync -- disconnect if it is an outbound disconnection
                 // candidate.
                 // Note: We compare their tip to nMinimumChainWork (rather than
                 // m_chainman.ActiveChain().Tip()) because we won't start block
                 // download until we have a headers chain that has at least
                 // nMinimumChainWork, even if a peer has a chain past our tip,
                 // as an anti-DoS measure.
                 if (pfrom.IsOutboundOrBlockRelayConn()) {
                     LogPrintf("Disconnecting outbound peer %d -- headers "
                               "chain has insufficient work\n",
                               pfrom.GetId());
                     pfrom.fDisconnect = true;
                 }
             }
         }
 
         // If this is an outbound full-relay peer, check to see if we should
         // protect it from the bad/lagging chain logic.
         // Note that outbound block-relay peers are excluded from this
         // protection, and thus always subject to eviction under the bad/lagging
         // chain logic.
         // See ChainSyncTimeoutState.
         if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
             nodestate->pindexBestKnownBlock != nullptr) {
             if (m_outbound_peers_with_protect_from_disconnect <
                     MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT &&
                 nodestate->pindexBestKnownBlock->nChainWork >=
                     m_chainman.ActiveChain().Tip()->nChainWork &&
                 !nodestate->m_chain_sync.m_protect) {
                 LogPrint(BCLog::NET,
                          "Protecting outbound peer=%d from eviction\n",
                          pfrom.GetId());
                 nodestate->m_chain_sync.m_protect = true;
                 ++m_outbound_peers_with_protect_from_disconnect;
             }
         }
     }
 }
 
 /**
  * Reconsider orphan transactions after a parent has been accepted to the
  * mempool.
  *
  * @param[in,out]  orphan_work_set  The set of orphan transactions to
  *    reconsider. Generally only one orphan will be reconsidered on each call of
  *    this function. This set may be added to if accepting an orphan causes its
  *    children to be reconsidered.
  */
 void PeerManagerImpl::ProcessOrphanTx(const Config &config,
                                       std::set<TxId> &orphan_work_set) {
     AssertLockHeld(cs_main);
     AssertLockHeld(g_cs_orphans);
     while (!orphan_work_set.empty()) {
         const TxId orphanTxId = *orphan_work_set.begin();
         orphan_work_set.erase(orphan_work_set.begin());
 
         const auto [porphanTx, from_peer] = m_orphanage.GetTx(orphanTxId);
         if (porphanTx == nullptr) {
             continue;
         }
 
         const MempoolAcceptResult result =
             m_chainman.ProcessTransaction(porphanTx);
         const TxValidationState &state = result.m_state;
         if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
             LogPrint(BCLog::MEMPOOL, "   accepted orphan tx %s\n",
                      orphanTxId.ToString());
             RelayTransaction(orphanTxId);
             m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set);
             m_orphanage.EraseTx(orphanTxId);
             break;
         } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
             if (state.IsInvalid()) {
                 LogPrint(BCLog::MEMPOOL,
                          "   invalid orphan tx %s from peer=%d. %s\n",
                          orphanTxId.ToString(), from_peer, state.ToString());
                 // Punish peer that gave us an invalid orphan tx
                 MaybePunishNodeForTx(from_peer, state);
             }
             // Has inputs but not accepted to mempool
             // Probably non-standard or insufficient fee
             LogPrint(BCLog::MEMPOOL, "   removed orphan tx %s\n",
                      orphanTxId.ToString());
 
             m_recent_rejects.insert(orphanTxId);
 
             m_orphanage.EraseTx(orphanTxId);
             break;
         }
     }
 }
 
 bool PeerManagerImpl::PrepareBlockFilterRequest(
     CNode &node, Peer &peer, BlockFilterType filter_type, uint32_t start_height,
     const BlockHash &stop_hash, uint32_t max_height_diff,
     const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
     const bool supported_filter_type =
         (filter_type == BlockFilterType::BASIC &&
          (peer.m_our_services & NODE_COMPACT_FILTERS));
     if (!supported_filter_type) {
         LogPrint(BCLog::NET,
                  "peer %d requested unsupported block filter type: %d\n",
                  node.GetId(), static_cast<uint8_t>(filter_type));
         node.fDisconnect = true;
         return false;
     }
 
     {
         LOCK(cs_main);
         stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
 
         // Check that the stop block exists and the peer would be allowed to
         // fetch it.
         if (!stop_index || !BlockRequestAllowed(stop_index)) {
             LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
                      node.GetId(), stop_hash.ToString());
             node.fDisconnect = true;
             return false;
         }
     }
 
     uint32_t stop_height = stop_index->nHeight;
     if (start_height > stop_height) {
         LogPrint(
             BCLog::NET,
             "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
                                                                    */
             "start height %d and stop height %d\n",
             node.GetId(), start_height, stop_height);
         node.fDisconnect = true;
         return false;
     }
     if (stop_height - start_height >= max_height_diff) {
         LogPrint(BCLog::NET,
                  "peer %d requested too many cfilters/cfheaders: %d / %d\n",
                  node.GetId(), stop_height - start_height + 1, max_height_diff);
         node.fDisconnect = true;
         return false;
     }
 
     filter_index = GetBlockFilterIndex(filter_type);
     if (!filter_index) {
         LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
                  BlockFilterTypeName(filter_type));
         return false;
     }
 
     return true;
 }
 
 void PeerManagerImpl::ProcessGetCFilters(CNode &node, Peer &peer,
                                          CDataStream &vRecv) {
     uint8_t filter_type_ser;
     uint32_t start_height;
     BlockHash stop_hash;
 
     vRecv >> filter_type_ser >> start_height >> stop_hash;
 
     const BlockFilterType filter_type =
         static_cast<BlockFilterType>(filter_type_ser);
 
     const CBlockIndex *stop_index;
     BlockFilterIndex *filter_index;
     if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
                                    stop_hash, MAX_GETCFILTERS_SIZE, stop_index,
                                    filter_index)) {
         return;
     }
 
     std::vector<BlockFilter> filters;
     if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
         LogPrint(BCLog::NET,
                  "Failed to find block filter in index: filter_type=%s, "
                  "start_height=%d, stop_hash=%s\n",
                  BlockFilterTypeName(filter_type), start_height,
                  stop_hash.ToString());
         return;
     }
 
     for (const auto &filter : filters) {
         CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
                                     .Make(NetMsgType::CFILTER, filter);
         m_connman.PushMessage(&node, std::move(msg));
     }
 }
 
 void PeerManagerImpl::ProcessGetCFHeaders(CNode &node, Peer &peer,
                                           CDataStream &vRecv) {
     uint8_t filter_type_ser;
     uint32_t start_height;
     BlockHash stop_hash;
 
     vRecv >> filter_type_ser >> start_height >> stop_hash;
 
     const BlockFilterType filter_type =
         static_cast<BlockFilterType>(filter_type_ser);
 
     const CBlockIndex *stop_index;
     BlockFilterIndex *filter_index;
     if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
                                    stop_hash, MAX_GETCFHEADERS_SIZE, stop_index,
                                    filter_index)) {
         return;
     }
 
     uint256 prev_header;
     if (start_height > 0) {
         const CBlockIndex *const prev_block =
             stop_index->GetAncestor(static_cast<int>(start_height - 1));
         if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
             LogPrint(BCLog::NET,
                      "Failed to find block filter header in index: "
                      "filter_type=%s, block_hash=%s\n",
                      BlockFilterTypeName(filter_type),
                      prev_block->GetBlockHash().ToString());
             return;
         }
     }
 
     std::vector<uint256> filter_hashes;
     if (!filter_index->LookupFilterHashRange(start_height, stop_index,
                                              filter_hashes)) {
         LogPrint(BCLog::NET,
                  "Failed to find block filter hashes in index: filter_type=%s, "
                  "start_height=%d, stop_hash=%s\n",
                  BlockFilterTypeName(filter_type), start_height,
                  stop_hash.ToString());
         return;
     }
 
     CSerializedNetMsg msg =
         CNetMsgMaker(node.GetCommonVersion())
             .Make(NetMsgType::CFHEADERS, filter_type_ser,
                   stop_index->GetBlockHash(), prev_header, filter_hashes);
     m_connman.PushMessage(&node, std::move(msg));
 }
 
 void PeerManagerImpl::ProcessGetCFCheckPt(CNode &node, Peer &peer,
                                           CDataStream &vRecv) {
     uint8_t filter_type_ser;
     BlockHash stop_hash;
 
     vRecv >> filter_type_ser >> stop_hash;
 
     const BlockFilterType filter_type =
         static_cast<BlockFilterType>(filter_type_ser);
 
     const CBlockIndex *stop_index;
     BlockFilterIndex *filter_index;
     if (!PrepareBlockFilterRequest(
             node, peer, filter_type, /*start_height=*/0, stop_hash,
             /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
             stop_index, filter_index)) {
         return;
     }
 
     std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
 
     // Populate headers.
     const CBlockIndex *block_index = stop_index;
     for (int i = headers.size() - 1; i >= 0; i--) {
         int height = (i + 1) * CFCHECKPT_INTERVAL;
         block_index = block_index->GetAncestor(height);
 
         if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
             LogPrint(BCLog::NET,
                      "Failed to find block filter header in index: "
                      "filter_type=%s, block_hash=%s\n",
                      BlockFilterTypeName(filter_type),
                      block_index->GetBlockHash().ToString());
             return;
         }
     }
 
     CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
                                 .Make(NetMsgType::CFCHECKPT, filter_type_ser,
                                       stop_index->GetBlockHash(), headers);
     m_connman.PushMessage(&node, std::move(msg));
 }
 
 bool IsAvalancheMessageType(const std::string &msg_type) {
     return msg_type == NetMsgType::AVAHELLO ||
            msg_type == NetMsgType::AVAPOLL ||
            msg_type == NetMsgType::AVARESPONSE ||
            msg_type == NetMsgType::AVAPROOF ||
            msg_type == NetMsgType::GETAVAADDR ||
            msg_type == NetMsgType::GETAVAPROOFS ||
            msg_type == NetMsgType::AVAPROOFS ||
            msg_type == NetMsgType::AVAPROOFSREQ;
 }
 
 uint32_t
 PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) const {
     AssertLockHeld(cs_main);
 
     const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
 
     // Unknown block.
     if (!pindex) {
         return -1;
     }
 
     // Invalid block
     if (pindex->nStatus.isInvalid()) {
         return 1;
     }
 
     // Parked block
     if (pindex->nStatus.isOnParkedChain()) {
         return 2;
     }
 
     const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
     const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
 
     // Active block.
     if (pindex == pindexFork) {
         return 0;
     }
 
     // Fork block.
     if (pindexFork != pindexTip) {
         return 3;
     }
 
     // Missing block data.
     if (!pindex->nStatus.hasData()) {
         return -2;
     }
 
     // This block is built on top of the tip, we have the data, it
     // is pending connection or rejection.
     return -3;
 };
 
 uint32_t PeerManagerImpl::GetAvalancheVoteForTx(const TxId &id) const {
     // Accepted in mempool, or in a recent block
     if (m_mempool.exists(id) ||
         WITH_LOCK(m_recent_confirmed_transactions_mutex,
                   return m_recent_confirmed_transactions.contains(id))) {
         return 0;
     }
 
     // Invalid tx
     if (m_recent_rejects.contains(id)) {
         return 1;
     }
 
     // Orphan tx
     if (m_orphanage.HaveTx(id)) {
         return 2;
     }
 
     // Unknown tx
     return -1;
 };
 
 /**
  * Decide a response for an Avalanche poll about the given proof.
  *
  * @param[in] id   The id of the proof being polled for
  * @return         Our current vote for the proof
  */
 static uint32_t getAvalancheVoteForProof(const avalanche::ProofId &id) {
     assert(g_avalanche);
 
     return g_avalanche->withPeerManager([&id](avalanche::PeerManager &pm) {
         // Rejected proof
         if (pm.isInvalid(id)) {
             return 1;
         }
 
         // The proof is actively bound to a peer
         if (pm.isBoundToPeer(id)) {
             return 0;
         }
 
         // Unknown proof
         if (!pm.exists(id)) {
             return -1;
         }
 
         // Immature proof
         if (pm.isImmature(id)) {
             return 2;
         }
 
         // Not immature, but in conflict with an actively bound proof
         if (pm.isInConflictingPool(id)) {
             return 3;
         }
 
         // The proof is known, not rejected, not immature, not a conflict, but
         // for some reason unbound. This should not happen if the above pools
         // are managed correctly, but added for robustness.
         return -2;
     });
 };
 
 void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
                                    const std::shared_ptr<const CBlock> &block,
                                    bool force_processing) {
     bool new_block{false};
     m_chainman.ProcessNewBlock(config, block, force_processing, &new_block);
     if (new_block) {
         node.m_last_block_time = GetTime<std::chrono::seconds>();
     } else {
         LOCK(cs_main);
         mapBlockSource.erase(block->GetHash());
     }
 }
 
 void PeerManagerImpl::ProcessMessage(
     const Config &config, CNode &pfrom, const std::string &msg_type,
     CDataStream &vRecv, const std::chrono::microseconds time_received,
     const std::atomic<bool> &interruptMsgProc) {
     LogPrint(BCLog::NETDEBUG, "received: %s (%u bytes) peer=%d\n",
              SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
 
     PeerRef peer = GetPeerRef(pfrom.GetId());
     if (peer == nullptr) {
         return;
     }
 
     if (IsAvalancheMessageType(msg_type)) {
         if (!g_avalanche) {
             LogPrint(BCLog::AVALANCHE,
                      "Avalanche is not initialized, ignoring %s message\n",
                      msg_type);
             return;
         }
 
         if (!isAvalancheEnabled(gArgs)) {
             // If avalanche is not enabled, ignore avalanche messages
             return;
         }
     }
 
     if (msg_type == NetMsgType::VERSION) {
         // Each connection can only send one version message
         if (pfrom.nVersion != 0) {
             Misbehaving(pfrom, 1, "redundant version message");
             return;
         }
 
         int64_t nTime;
         CService addrMe;
         uint64_t nNonce = 1;
         ServiceFlags nServices;
         int nVersion;
         std::string cleanSubVer;
         int starting_height = -1;
         bool fRelay = true;
         uint64_t nExtraEntropy = 1;
 
         vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
         if (nTime < 0) {
             nTime = 0;
         }
         // Ignore the addrMe service bits sent by the peer
         vRecv.ignore(8);
         vRecv >> addrMe;
         if (!pfrom.IsInboundConn()) {
             m_addrman.SetServices(pfrom.addr, nServices);
         }
         if (pfrom.ExpectServicesFromConn() &&
             !HasAllDesirableServiceFlags(nServices)) {
             LogPrint(BCLog::NET,
                      "peer=%d does not offer the expected services "
                      "(%08x offered, %08x expected); disconnecting\n",
                      pfrom.GetId(), nServices,
                      GetDesirableServiceFlags(nServices));
             pfrom.fDisconnect = true;
             return;
         }
 
         if (pfrom.IsAvalancheOutboundConnection() &&
             !(nServices & NODE_AVALANCHE)) {
             LogPrint(
                 BCLog::AVALANCHE,
                 "peer=%d does not offer the avalanche service; disconnecting\n",
                 pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         if (nVersion < MIN_PEER_PROTO_VERSION) {
             // disconnect from peers older than this proto version
             LogPrint(BCLog::NET,
                      "peer=%d using obsolete version %i; disconnecting\n",
                      pfrom.GetId(), nVersion);
             pfrom.fDisconnect = true;
             return;
         }
 
         if (!vRecv.empty()) {
             // The version message includes information about the sending node
             // which we don't use:
             //   - 8 bytes (service bits)
             //   - 16 bytes (ipv6 address)
             //   - 2 bytes (port)
             vRecv.ignore(26);
             vRecv >> nNonce;
         }
         if (!vRecv.empty()) {
             std::string strSubVer;
             vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
             cleanSubVer = SanitizeString(strSubVer);
         }
         if (!vRecv.empty()) {
             vRecv >> starting_height;
         }
         if (!vRecv.empty()) {
             vRecv >> fRelay;
         }
         if (!vRecv.empty()) {
             vRecv >> nExtraEntropy;
         }
         // Disconnect if we connected to ourself
         if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
             LogPrintf("connected to self at %s, disconnecting\n",
                       pfrom.addr.ToString());
             pfrom.fDisconnect = true;
             return;
         }
 
         if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
             SeenLocal(addrMe);
         }
 
         // Inbound peers send us their version message when they connect.
         // We send our version message in response.
         if (pfrom.IsInboundConn()) {
             PushNodeVersion(config, pfrom, *peer);
         }
 
         // Change version
         const int greatest_common_version =
             std::min(nVersion, PROTOCOL_VERSION);
         pfrom.SetCommonVersion(greatest_common_version);
         pfrom.nVersion = nVersion;
 
         const CNetMsgMaker msg_maker(greatest_common_version);
 
         m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
 
         // Signal ADDRv2 support (BIP155).
         m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
 
         pfrom.m_has_all_wanted_services =
             HasAllDesirableServiceFlags(nServices);
         peer->m_their_services = nServices;
         pfrom.SetAddrLocal(addrMe);
         {
             LOCK(pfrom.m_subver_mutex);
             pfrom.cleanSubVer = cleanSubVer;
         }
         peer->m_starting_height = starting_height;
 
         // We only initialize the m_tx_relay data structure if:
         // - this isn't an outbound block-relay-only connection; and
         // - fRelay=true or we're offering NODE_BLOOM to this peer
         //   (NODE_BLOOM means that the peer may turn on tx relay later)
         if (!pfrom.IsBlockOnlyConn() &&
             (fRelay || (peer->m_our_services & NODE_BLOOM))) {
             auto *const tx_relay = peer->SetTxRelay();
             {
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 // set to true after we get the first filter* message
                 tx_relay->m_relay_txs = fRelay;
             }
             if (fRelay) {
                 pfrom.m_relays_txs = true;
             }
         }
 
         pfrom.nRemoteHostNonce = nNonce;
         pfrom.nRemoteExtraEntropy = nExtraEntropy;
 
         // Potentially mark this peer as a preferred download peer.
         {
             LOCK(cs_main);
             CNodeState *state = State(pfrom.GetId());
             state->fPreferredDownload =
                 (!pfrom.IsInboundConn() ||
                  pfrom.HasPermission(NetPermissionFlags::NoBan)) &&
                 !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
             m_num_preferred_download_peers += state->fPreferredDownload;
         }
 
         // Self advertisement & GETADDR logic
         if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) {
             // For outbound peers, we try to relay our address (so that other
             // nodes can try to find us more quickly, as we have no guarantee
             // that an outbound peer is even aware of how to reach us) and do a
             // one-time address fetch (to help populate/update our addrman). If
             // we're starting up for the first time, our addrman may be pretty
             // empty and no one will know who we are, so these mechanisms are
             // important to help us connect to the network.
             //
             // We skip this for block-relay-only peers. We want to avoid
             // potentially leaking addr information and we do not want to
             // indicate to the peer that we will participate in addr relay.
             if (fListen &&
                 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                 CAddress addr{GetLocalAddress(pfrom.addr), peer->m_our_services,
                               (uint32_t)GetAdjustedTime()};
                 FastRandomContext insecure_rand;
                 if (addr.IsRoutable()) {
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     PushAddress(*peer, addr, insecure_rand);
                 } else if (IsPeerAddrLocalGood(&pfrom)) {
                     // Override just the address with whatever the peer sees us
                     // as. Leave the port in addr as it was returned by
                     // GetLocalAddress() above, as this is an outbound
                     // connection and the peer cannot observe our listening
                     // port.
                     addr.SetIP(addrMe);
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     PushAddress(*peer, addr, insecure_rand);
                 }
             }
 
             // Get recent addresses
             m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
                                               .Make(NetMsgType::GETADDR));
             peer->m_getaddr_sent = true;
             // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
             // addresses in response (bypassing the
             // MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
             WITH_LOCK(peer->m_addr_token_bucket_mutex,
                       peer->m_addr_token_bucket += GetMaxAddrToSend());
         }
 
         if (!pfrom.IsInboundConn()) {
             // For non-inbound connections, we update the addrman to record
             // connection success so that addrman will have an up-to-date
             // notion of which peers are online and available.
             //
             // While we strive to not leak information about block-relay-only
             // connections via the addrman, not moving an address to the tried
             // table is also potentially detrimental because new-table entries
             // are subject to eviction in the event of addrman collisions.  We
             // mitigate the information-leak by never calling
             // AddrMan::Connected() on block-relay-only peers; see
             // FinalizeNode().
             //
             // This moves an address from New to Tried table in Addrman,
             // resolves tried-table collisions, etc.
             m_addrman.Good(pfrom.addr);
         }
 
         std::string remoteAddr;
         if (fLogIPs) {
             remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
         }
 
         LogPrint(BCLog::NET,
                  "receive version message: [%s] %s: version %d, blocks=%d, "
                  "us=%s, txrelay=%d, peer=%d%s\n",
                  pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
                  peer->m_starting_height, addrMe.ToString(), fRelay,
                  pfrom.GetId(), remoteAddr);
 
         int64_t currentTime = GetTime();
         int64_t nTimeOffset = nTime - currentTime;
         pfrom.nTimeOffset = nTimeOffset;
         if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
             // Ignore time offsets that are improbable (before the Genesis
             // block) and may underflow our adjusted time.
             Misbehaving(pfrom, 20,
                         "Ignoring invalid timestamp in version message");
         } else if (!pfrom.IsInboundConn()) {
             // Don't use timedata samples from inbound peers to make it
             // harder for others to tamper with our adjusted time.
             AddTimeData(pfrom.addr, nTimeOffset);
         }
 
         // Feeler connections exist only to verify if address is online.
         if (pfrom.IsFeelerConn()) {
             LogPrint(BCLog::NET,
                      "feeler connection completed peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
         }
         return;
     }
 
     if (pfrom.nVersion == 0) {
         // Must have a version message before anything else
         Misbehaving(pfrom, 10, "non-version message before version handshake");
         return;
     }
 
     // At this point, the outgoing message serialization version can't change.
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     if (msg_type == NetMsgType::VERACK) {
         if (pfrom.fSuccessfullyConnected) {
             LogPrint(BCLog::NET,
                      "ignoring redundant verack message from peer=%d\n",
                      pfrom.GetId());
             return;
         }
 
         if (!pfrom.IsInboundConn()) {
             LogPrintf(
                 "New outbound peer connected: version: %d, blocks=%d, "
                 "peer=%d%s (%s)\n",
                 pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(),
                 (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
                          : ""),
                 pfrom.ConnectionTypeAsString());
         }
 
         if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
             // Tell our peer we prefer to receive headers rather than inv's
             // We send this to non-NODE NETWORK peers as well, because even
             // non-NODE NETWORK peers can announce blocks (such as pruning
             // nodes)
             m_connman.PushMessage(&pfrom,
                                   msgMaker.Make(NetMsgType::SENDHEADERS));
         }
 
         if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
             // Tell our peer we are willing to provide version 1
             // cmpctblocks. However, we do not request new block announcements
             // using cmpctblock messages. We send this to non-NODE NETWORK peers
             // as well, because they may wish to request compact blocks from us.
             m_connman.PushMessage(
                 &pfrom,
                 msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false,
                               /*version=*/CMPCTBLOCKS_VERSION));
         }
 
         if (g_avalanche && isAvalancheEnabled(gArgs)) {
             if (g_avalanche->sendHello(&pfrom)) {
                 auto localProof = g_avalanche->getLocalProof();
 
                 if (localProof) {
                     AddKnownProof(*peer, localProof->getId());
                     // Add our proof id to the list or the recently announced
                     // proof INVs to this peer. This is used for filtering which
                     // INV can be requested for download.
                     LOCK(cs_main);
                     State(pfrom.GetId())
                         ->m_recently_announced_proofs.insert(
                             localProof->getId());
                 }
             }
         }
 
         pfrom.fSuccessfullyConnected = true;
         return;
     }
 
     if (!pfrom.fSuccessfullyConnected) {
         // Must have a verack message before anything else
         Misbehaving(pfrom, 10, "non-verack message before version handshake");
         return;
     }
 
     if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
         int stream_version = vRecv.GetVersion();
         if (msg_type == NetMsgType::ADDRV2) {
             // Add ADDRV2_FORMAT to the version so that the CNetAddr and
             // CAddress unserialize methods know that an address in v2 format is
             // coming.
             stream_version |= ADDRV2_FORMAT;
         }
 
         OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
         std::vector<CAddress> vAddr;
 
         s >> vAddr;
 
         if (!SetupAddressRelay(pfrom, *peer)) {
             LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
                      msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
             return;
         }
 
         if (vAddr.size() > GetMaxAddrToSend()) {
             Misbehaving(
                 pfrom, 20,
                 strprintf("%s message size = %u", msg_type, vAddr.size()));
             return;
         }
 
         // Store the new addresses
         std::vector<CAddress> vAddrOk;
         int64_t nNow = GetAdjustedTime();
         int64_t nSince = nNow - 10 * 60;
 
         // Update/increment addr rate limiting bucket.
         const auto current_time = GetTime<std::chrono::microseconds>();
         {
             LOCK(peer->m_addr_token_bucket_mutex);
             if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
                 // Don't increment bucket if it's already full
                 const auto time_diff =
                     std::max(current_time - peer->m_addr_token_timestamp, 0us);
                 const double increment =
                     CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND;
                 peer->m_addr_token_bucket =
                     std::min<double>(peer->m_addr_token_bucket + increment,
                                      MAX_ADDR_PROCESSING_TOKEN_BUCKET);
             }
         }
         peer->m_addr_token_timestamp = current_time;
 
         const bool rate_limited =
             !pfrom.HasPermission(NetPermissionFlags::Addr);
         uint64_t num_proc = 0;
         uint64_t num_rate_limit = 0;
         Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext());
         for (CAddress &addr : vAddr) {
             if (interruptMsgProc) {
                 return;
             }
 
             {
                 LOCK(peer->m_addr_token_bucket_mutex);
                 // Apply rate limiting.
                 if (peer->m_addr_token_bucket < 1.0) {
                     if (rate_limited) {
                         ++num_rate_limit;
                         continue;
                     }
                 } else {
                     peer->m_addr_token_bucket -= 1.0;
                 }
             }
 
             // We only bother storing full nodes, though this may include things
             // which we would not make an outbound connection to, in part
             // because we may make feeler connections to them.
             if (!MayHaveUsefulAddressDB(addr.nServices) &&
                 !HasAllDesirableServiceFlags(addr.nServices)) {
                 continue;
             }
 
             if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
                 addr.nTime = nNow - 5 * 24 * 60 * 60;
             }
             AddAddressKnown(*peer, addr);
             if (m_banman &&
                 (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
                 // Do not process banned/discouraged addresses beyond
                 // remembering we received them
                 continue;
             }
             ++num_proc;
             bool fReachable = IsReachable(addr);
             if (addr.nTime > nSince && !peer->m_getaddr_sent &&
                 vAddr.size() <= 10 && addr.IsRoutable()) {
                 // Relay to a limited number of other nodes
                 RelayAddress(pfrom.GetId(), addr, fReachable);
             }
             // Do not store addresses outside our network
             if (fReachable) {
                 vAddrOk.push_back(addr);
             }
         }
         peer->m_addr_processed += num_proc;
         peer->m_addr_rate_limited += num_rate_limit;
         LogPrint(BCLog::NET,
                  "Received addr: %u addresses (%u processed, %u rate-limited) "
                  "from peer=%d\n",
                  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
 
         m_addrman.Add(vAddrOk, pfrom.addr, 2 * 60 * 60);
         if (vAddr.size() < 1000) {
             peer->m_getaddr_sent = false;
         }
 
         // AddrFetch: Require multiple addresses to avoid disconnecting on
         // self-announcements
         if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
             LogPrint(BCLog::NET,
                      "addrfetch connection completed peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
         }
         return;
     }
 
     if (msg_type == NetMsgType::SENDADDRV2) {
         peer->m_wants_addrv2 = true;
         return;
     }
 
     if (msg_type == NetMsgType::SENDHEADERS) {
         LOCK(cs_main);
         State(pfrom.GetId())->fPreferHeaders = true;
         return;
     }
 
     if (msg_type == NetMsgType::SENDCMPCT) {
         bool sendcmpct_hb{false};
         uint64_t sendcmpct_version{0};
         vRecv >> sendcmpct_hb >> sendcmpct_version;
 
         if (sendcmpct_version != CMPCTBLOCKS_VERSION) {
             return;
         }
 
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom.GetId());
         nodestate->m_provides_cmpctblocks = true;
         nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
         // save whether peer selects us as BIP152 high-bandwidth peer
         // (receiving sendcmpct(1) signals high-bandwidth,
         // sendcmpct(0) low-bandwidth)
         pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
         return;
     }
 
     if (msg_type == NetMsgType::INV) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             Misbehaving(pfrom, 20,
                         strprintf("inv message size = %u", vInv.size()));
             return;
         }
 
         // Reject tx INVs when the -blocksonly setting is enabled, or this is a
         // block-relay-only peer
         bool reject_tx_invs{m_ignore_incoming_txs || pfrom.IsBlockOnlyConn()};
 
         // Allow peers with relay permission to send data other than blocks
         // in blocks only mode
         if (pfrom.HasPermission(NetPermissionFlags::Relay)) {
             reject_tx_invs = false;
         }
 
         const auto current_time{GetTime<std::chrono::microseconds>()};
         std::optional<BlockHash> best_block;
 
         auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
             LogPrint(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(),
                      fAlreadyHave ? "have" : "new", pfrom.GetId());
         };
 
         for (CInv &inv : vInv) {
             if (interruptMsgProc) {
                 return;
             }
 
             if (inv.IsMsgBlk()) {
                 LOCK(cs_main);
                 const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
                 logInv(inv, fAlreadyHave);
 
                 const BlockHash hash{inv.hash};
                 UpdateBlockAvailability(pfrom.GetId(), hash);
                 if (!fAlreadyHave && !fImporting && !fReindex &&
                     !IsBlockRequested(hash)) {
                     // Headers-first is the primary method of announcement on
                     // the network. If a node fell back to sending blocks by
                     // inv, it's probably for a re-org. The final block hash
                     // provided should be the highest, so send a getheaders and
                     // then fetch the blocks we need to catch up.
                     best_block = std::move(hash);
                 }
 
                 continue;
             }
 
             if (inv.IsMsgProof()) {
                 const avalanche::ProofId proofid(inv.hash);
                 const bool fAlreadyHave = AlreadyHaveProof(proofid);
                 logInv(inv, fAlreadyHave);
                 AddKnownProof(*peer, proofid);
 
                 if (!fAlreadyHave && g_avalanche && isAvalancheEnabled(gArgs) &&
                     !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                     const bool preferred = isPreferredDownloadPeer(pfrom);
 
                     LOCK(cs_proofrequest);
                     AddProofAnnouncement(pfrom, proofid, current_time,
                                          preferred);
                 }
                 continue;
             }
 
             if (inv.IsMsgTx()) {
                 LOCK(cs_main);
                 const TxId txid(inv.hash);
                 const bool fAlreadyHave = AlreadyHaveTx(txid);
                 logInv(inv, fAlreadyHave);
 
                 AddKnownTx(*peer, txid);
                 if (reject_tx_invs) {
                     LogPrint(BCLog::NET,
                              "transaction (%s) inv sent in violation of "
                              "protocol, disconnecting peer=%d\n",
                              txid.ToString(), pfrom.GetId());
                     pfrom.fDisconnect = true;
                     return;
                 } else if (!fAlreadyHave && !m_chainman.ActiveChainstate()
                                                  .IsInitialBlockDownload()) {
                     AddTxAnnouncement(pfrom, txid, current_time);
                 }
 
                 continue;
             }
 
             LogPrint(BCLog::NET,
                      "Unknown inv type \"%s\" received from peer=%d\n",
                      inv.ToString(), pfrom.GetId());
         }
 
         if (best_block) {
             LOCK(m_chainman.GetMutex());
             m_connman.PushMessage(
                 &pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
                                       m_chainman.ActiveChain().GetLocator(
                                           m_chainman.m_best_header),
                                       *best_block));
             LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
                      m_chainman.m_best_header->nHeight, best_block->ToString(),
                      pfrom.GetId());
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::GETDATA) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             Misbehaving(pfrom, 20,
                         strprintf("getdata message size = %u", vInv.size()));
             return;
         }
 
         LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
                  vInv.size(), pfrom.GetId());
 
         if (vInv.size() > 0) {
             LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
                      vInv[0].ToString(), pfrom.GetId());
         }
 
         {
             LOCK(peer->m_getdata_requests_mutex);
             peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
                                             vInv.begin(), vInv.end());
             ProcessGetData(config, pfrom, *peer, interruptMsgProc);
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::GETBLOCKS) {
         CBlockLocator locator;
         uint256 hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getblocks locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         // We might have announced the currently-being-connected tip using a
         // compact block, which resulted in the peer sending a getblocks
         // request, which we would otherwise respond to without the new block.
         // To avoid this situation we simply verify that we are on our best
         // known chain now. This is super overkill, but we handle it better
         // for getheaders requests, and there are no known nodes which support
         // compact blocks but still use getblocks to request blocks.
         {
             std::shared_ptr<const CBlock> a_recent_block;
             {
                 LOCK(m_most_recent_block_mutex);
                 a_recent_block = m_most_recent_block;
             }
             BlockValidationState state;
             if (!m_chainman.ActiveChainstate().ActivateBestChain(
                     config, state, a_recent_block)) {
                 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                          state.ToString());
             }
         }
 
         LOCK(cs_main);
 
         // Find the last block the caller has in the main chain
         const CBlockIndex *pindex =
             m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
 
         // Send the rest of the chain
         if (pindex) {
             pindex = m_chainman.ActiveChain().Next(pindex);
         }
         int nLimit = 500;
         LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
                  pfrom.GetId());
         for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
             if (pindex->GetBlockHash() == hashStop) {
                 LogPrint(BCLog::NET, "  getblocks stopping at %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             // If pruning, don't inv blocks unless we have on disk and are
             // likely to still have for some reasonable time window (1 hour)
             // that block relay might require.
             const int nPrunedBlocksLikelyToHave =
                 MIN_BLOCKS_TO_KEEP -
                 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
             if (fPruneMode &&
                 (!pindex->nStatus.hasData() ||
                  pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
                                         nPrunedBlocksLikelyToHave)) {
                 LogPrint(
                     BCLog::NET,
                     " getblocks stopping, pruned or too old block at %d %s\n",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             WITH_LOCK(
                 peer->m_block_inv_mutex,
                 peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
             if (--nLimit <= 0) {
                 // When this block is requested, we'll send an inv that'll
                 // trigger the peer to getblocks the next batch of inventory.
                 LogPrint(BCLog::NET, "  getblocks stopping at limit %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 WITH_LOCK(peer->m_block_inv_mutex, {
                     peer->m_continuation_block = pindex->GetBlockHash();
                 });
                 break;
             }
         }
         return;
     }
 
     if (msg_type == NetMsgType::GETBLOCKTXN) {
         BlockTransactionsRequest req;
         vRecv >> req;
 
         std::shared_ptr<const CBlock> recent_block;
         {
             LOCK(m_most_recent_block_mutex);
             if (m_most_recent_block_hash == req.blockhash) {
                 recent_block = m_most_recent_block;
             }
             // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
         }
         if (recent_block) {
             SendBlockTransactions(pfrom, *recent_block, req);
             return;
         }
 
         {
             LOCK(cs_main);
 
             const CBlockIndex *pindex =
                 m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
             if (!pindex || !pindex->nStatus.hasData()) {
                 LogPrint(
                     BCLog::NET,
                     "Peer %d sent us a getblocktxn for a block we don't have\n",
                     pfrom.GetId());
                 return;
             }
 
             if (pindex->nHeight >=
                 m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
                 CBlock block;
                 bool ret = ReadBlockFromDisk(block, pindex,
                                              m_chainparams.GetConsensus());
                 assert(ret);
 
                 SendBlockTransactions(pfrom, block, req);
                 return;
             }
         }
 
         // If an older block is requested (should never happen in practice,
         // but can happen in tests) send a block response instead of a
         // blocktxn response. Sending a full block response instead of a
         // small blocktxn response is preferable in the case where a peer
         // might maliciously send lots of getblocktxn requests to trigger
         // expensive disk reads, because it will require the peer to
         // actually receive all the data read from disk over the network.
         LogPrint(BCLog::NET,
                  "Peer %d sent us a getblocktxn for a block > %i deep\n",
                  pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
         CInv inv;
         inv.type = MSG_BLOCK;
         inv.hash = req.blockhash;
         WITH_LOCK(peer->m_getdata_requests_mutex,
                   peer->m_getdata_requests.push_back(inv));
         // The message processing loop will go around again (without pausing)
         // and we'll respond then (without cs_main)
         return;
     }
 
     if (msg_type == NetMsgType::GETHEADERS) {
         CBlockLocator locator;
         BlockHash hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getheaders locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         if (fImporting || fReindex) {
             LogPrint(
                 BCLog::NET,
                 "Ignoring getheaders from peer=%d while importing/reindexing\n",
                 pfrom.GetId());
             return;
         }
 
         LOCK(cs_main);
 
         // Note that if we were to be on a chain that forks from the
         // checkpointed chain, then serving those headers to a peer that has
         // seen the checkpointed chain would cause that peer to disconnect us.
         // Requiring that our chainwork exceed nMinimumChainWork is a protection
         // against being fed a bogus chain when we started up for the first time
         // and getting partitioned off the honest network for serving that chain
         // to others.
         if (m_chainman.ActiveTip() == nullptr ||
             (m_chainman.ActiveTip()->nChainWork < nMinimumChainWork &&
              !pfrom.HasPermission(NetPermissionFlags::Download))) {
             LogPrint(BCLog::NET,
                      "Ignoring getheaders from peer=%d because active chain "
                      "has too little work\n",
                      pfrom.GetId());
             return;
         }
 
         CNodeState *nodestate = State(pfrom.GetId());
         const CBlockIndex *pindex = nullptr;
         if (locator.IsNull()) {
             // If locator is null, return the hashStop block
             pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
             if (!pindex) {
                 return;
             }
 
             if (!BlockRequestAllowed(pindex)) {
                 LogPrint(BCLog::NET,
                          "%s: ignoring request from peer=%i for old block "
                          "header that isn't in the main chain\n",
                          __func__, pfrom.GetId());
                 return;
             }
         } else {
             // Find the last block the caller has in the main chain
             pindex =
                 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
             if (pindex) {
                 pindex = m_chainman.ActiveChain().Next(pindex);
             }
         }
 
         // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
         // count at the end
         std::vector<CBlock> vHeaders;
         int nLimit = MAX_HEADERS_RESULTS;
         LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(),
                  pfrom.GetId());
         for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
             vHeaders.push_back(pindex->GetBlockHeader());
             if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
                 break;
             }
         }
         // pindex can be nullptr either if we sent
         // m_chainman.ActiveChain().Tip() OR if our peer has
         // m_chainman.ActiveChain().Tip() (and thus we are sending an empty
         // headers message). In both cases it's safe to update
         // pindexBestHeaderSent to be our tip.
         //
         // It is important that we simply reset the BestHeaderSent value here,
         // and not max(BestHeaderSent, newHeaderSent). We might have announced
         // the currently-being-connected tip using a compact block, which
         // resulted in the peer sending a headers request, which we respond to
         // without the new block. By resetting the BestHeaderSent, we ensure we
         // will re-announce the new block via headers (or compact blocks again)
         // in the SendMessages logic.
         nodestate->pindexBestHeaderSent =
             pindex ? pindex : m_chainman.ActiveChain().Tip();
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::HEADERS, vHeaders));
         return;
     }
 
     if (msg_type == NetMsgType::TX) {
         // Stop processing the transaction early if
         // 1) We are in blocks only mode and peer has no relay permission; OR
         // 2) This peer is a block-relay-only peer
         if ((m_ignore_incoming_txs &&
              !pfrom.HasPermission(NetPermissionFlags::Relay)) ||
             pfrom.IsBlockOnlyConn()) {
             LogPrint(BCLog::NET,
                      "transaction sent in violation of protocol peer=%d\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         CTransactionRef ptx;
         vRecv >> ptx;
         const CTransaction &tx = *ptx;
         const TxId &txid = tx.GetId();
         AddKnownTx(*peer, txid);
 
         LOCK2(cs_main, g_cs_orphans);
 
         m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
 
         if (AlreadyHaveTx(txid)) {
             if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) {
                 // Always relay transactions received from peers with
                 // forcerelay permission, even if they were already in the
                 // mempool, allowing the node to function as a gateway for
                 // nodes hidden behind it.
                 if (!m_mempool.exists(tx.GetId())) {
                     LogPrintf("Not relaying non-mempool transaction %s from "
                               "forcerelay peer=%d\n",
                               tx.GetId().ToString(), pfrom.GetId());
                 } else {
                     LogPrintf("Force relaying tx %s from peer=%d\n",
                               tx.GetId().ToString(), pfrom.GetId());
                     RelayTransaction(tx.GetId());
                 }
             }
             return;
         }
 
         const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
         const TxValidationState &state = result.m_state;
 
         if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
             // As this version of the transaction was acceptable, we can forget
             // about any requests for it.
             m_txrequest.ForgetInvId(tx.GetId());
             RelayTransaction(tx.GetId());
             m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set);
 
             pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
 
             LogPrint(BCLog::MEMPOOL,
                      "AcceptToMemoryPool: peer=%d: accepted %s "
                      "(poolsz %u txn, %u kB)\n",
                      pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(),
                      m_mempool.DynamicMemoryUsage() / 1000);
 
             // Recursively process any orphan transactions that depended on this
             // one
             ProcessOrphanTx(config, peer->m_orphan_work_set);
         } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
             // It may be the case that the orphans parents have all been
             // rejected.
             bool fRejectedParents = false;
 
             // Deduplicate parent txids, so that we don't have to loop over
             // the same parent txid more than once down below.
             std::vector<TxId> unique_parents;
             unique_parents.reserve(tx.vin.size());
             for (const CTxIn &txin : tx.vin) {
                 // We start with all parents, and then remove duplicates below.
                 unique_parents.push_back(txin.prevout.GetTxId());
             }
             std::sort(unique_parents.begin(), unique_parents.end());
             unique_parents.erase(
                 std::unique(unique_parents.begin(), unique_parents.end()),
                 unique_parents.end());
             for (const TxId &parent_txid : unique_parents) {
                 if (m_recent_rejects.contains(parent_txid)) {
                     fRejectedParents = true;
                     break;
                 }
             }
             if (!fRejectedParents) {
                 const auto current_time{GetTime<std::chrono::microseconds>()};
 
                 for (const TxId &parent_txid : unique_parents) {
                     // FIXME: MSG_TX should use a TxHash, not a TxId.
                     AddKnownTx(*peer, parent_txid);
                     if (!AlreadyHaveTx(parent_txid)) {
                         AddTxAnnouncement(pfrom, parent_txid, current_time);
                     }
                 }
 
                 if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
                     AddToCompactExtraTransactions(ptx);
                 }
 
                 // Once added to the orphan pool, a tx is considered
                 // AlreadyHave, and we shouldn't request it anymore.
                 m_txrequest.ForgetInvId(tx.GetId());
 
                 // DoS prevention: do not allow m_orphanage to grow
                 // unbounded (see CVE-2012-3789)
                 unsigned int nMaxOrphanTx = (unsigned int)std::max(
                     int64_t(0),
                     gArgs.GetIntArg("-maxorphantx",
                                     DEFAULT_MAX_ORPHAN_TRANSACTIONS));
                 unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTx);
                 if (nEvicted > 0) {
                     LogPrint(BCLog::MEMPOOL,
                              "orphanage overflow, removed %u tx\n", nEvicted);
                 }
             } else {
                 LogPrint(BCLog::MEMPOOL,
                          "not keeping orphan with rejected parents %s\n",
                          tx.GetId().ToString());
                 // We will continue to reject this tx since it has rejected
                 // parents so avoid re-requesting it from other peers.
                 m_recent_rejects.insert(tx.GetId());
                 m_txrequest.ForgetInvId(tx.GetId());
             }
         } else {
             m_recent_rejects.insert(tx.GetId());
             m_txrequest.ForgetInvId(tx.GetId());
 
             if (RecursiveDynamicUsage(*ptx) < 100000) {
                 AddToCompactExtraTransactions(ptx);
             }
         }
 
         // If a tx has been detected by m_recent_rejects, we will have reached
         // this point and the tx will have been ignored. Because we haven't
         // submitted the tx to our mempool, we won't have computed a DoS
         // score for it or determined exactly why we consider it invalid.
         //
         // This means we won't penalize any peer subsequently relaying a DoSy
         // tx (even if we penalized the first peer who gave it to us) because
         // we have to account for m_recent_rejects showing false positives. In
         // other words, we shouldn't penalize a peer if we aren't *sure* they
         // submitted a DoSy tx.
         //
         // Note that m_recent_rejects doesn't just record DoSy or invalid
         // transactions, but any tx not accepted by the mempool, which may be
         // due to node policy (vs. consensus). So we can't blanket penalize a
         // peer simply for relaying a tx that our m_recent_rejects has caught,
         // regardless of false positives.
 
         if (state.IsInvalid()) {
             LogPrint(BCLog::MEMPOOLREJ,
                      "%s from peer=%d was not accepted: %s\n",
                      tx.GetHash().ToString(), pfrom.GetId(), state.ToString());
             MaybePunishNodeForTx(pfrom.GetId(), state);
         }
         return;
     }
 
     if (msg_type == NetMsgType::CMPCTBLOCK) {
         // Ignore cmpctblock received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected cmpctblock message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         CBlockHeaderAndShortTxIDs cmpctblock;
         try {
             vRecv >> cmpctblock;
         } catch (std::ios_base::failure &e) {
             // This block has non contiguous or overflowing indexes
             Misbehaving(pfrom, 100, "cmpctblock-bad-indexes");
             return;
         }
 
         bool received_new_header = false;
 
         {
             LOCK(cs_main);
 
             if (!m_chainman.m_blockman.LookupBlockIndex(
                     cmpctblock.header.hashPrevBlock)) {
                 // Doesn't connect (or is genesis), instead of DoSing in
                 // AcceptBlockHeader, request deeper headers
                 if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                     m_connman.PushMessage(
                         &pfrom,
                         msgMaker.Make(NetMsgType::GETHEADERS,
                                       m_chainman.ActiveChain().GetLocator(
                                           m_chainman.m_best_header),
                                       uint256()));
                 }
                 return;
             }
 
             if (!m_chainman.m_blockman.LookupBlockIndex(
                     cmpctblock.header.GetHash())) {
                 received_new_header = true;
             }
         }
 
         const CBlockIndex *pindex = nullptr;
         BlockValidationState state;
         if (!m_chainman.ProcessNewBlockHeaders(config, {cmpctblock.header},
                                                state, &pindex)) {
             if (state.IsInvalid()) {
                 MaybePunishNodeForBlock(pfrom.GetId(), state,
                                         /*via_compact_block*/ true,
                                         "invalid header via cmpctblock");
                 return;
             }
         }
 
         // When we succeed in decoding a block's txids from a cmpctblock
         // message we typically jump to the BLOCKTXN handling code, with a
         // dummy (empty) BLOCKTXN message, to re-use the logic there in
         // completing processing of the putative block (without cs_main).
         bool fProcessBLOCKTXN = false;
         CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
 
         // If we end up treating this as a plain headers message, call that as
         // well
         // without cs_main.
         bool fRevertToHeaderProcessing = false;
 
         // Keep a CBlock for "optimistic" compactblock reconstructions (see
         // below)
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockReconstructed = false;
 
         {
             LOCK2(cs_main, g_cs_orphans);
             // If AcceptBlockHeader returned true, it set pindex
             assert(pindex);
             UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
 
             CNodeState *nodestate = State(pfrom.GetId());
 
             // If this was a new header with more work than our tip, update the
             // peer's last block announcement time
             if (received_new_header &&
                 pindex->nChainWork >
                     m_chainman.ActiveChain().Tip()->nChainWork) {
                 nodestate->m_last_block_announcement = GetTime();
             }
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator blockInFlightIt =
                     mapBlocksInFlight.find(pindex->GetBlockHash());
             bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
 
             if (pindex->nStatus.hasData()) {
                 // Nothing to do here
                 return;
             }
 
             if (pindex->nChainWork <=
                     m_chainman.ActiveChain()
                         .Tip()
                         ->nChainWork || // We know something better
                 pindex->nTx != 0) {
                 // We had this block at some point, but pruned it
                 if (fAlreadyInFlight) {
                     // We requested this block for some reason, but our mempool
                     // will probably be useless so we just grab the block via
                     // normal getdata.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     m_connman.PushMessage(
                         &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                 }
                 return;
             }
 
             // If we're not close to tip yet, give up and let parallel block
             // fetch work its magic.
             if (!fAlreadyInFlight && !CanDirectFetch()) {
                 return;
             }
 
             // We want to be a bit conservative just to be extra careful about
             // DoS possibilities in compact block processing...
             if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
                 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
                                               MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
                     (fAlreadyInFlight &&
                      blockInFlightIt->second.first == pfrom.GetId())) {
                     std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
                     if (!BlockRequested(config, pfrom.GetId(), *pindex,
                                         &queuedBlockIt)) {
                         if (!(*queuedBlockIt)->partialBlock) {
                             (*queuedBlockIt)
                                 ->partialBlock.reset(
                                     new PartiallyDownloadedBlock(config,
                                                                  &m_mempool));
                         } else {
                             // The block was already in flight using compact
                             // blocks from the same peer.
                             LogPrint(BCLog::NET, "Peer sent us compact block "
                                                  "we were already syncing!\n");
                             return;
                         }
                     }
 
                     PartiallyDownloadedBlock &partialBlock =
                         *(*queuedBlockIt)->partialBlock;
                     ReadStatus status =
                         partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status == READ_STATUS_INVALID) {
                         // Reset in-flight state in case Misbehaving does not
                         // result in a disconnect
                         RemoveBlockRequest(pindex->GetBlockHash());
                         Misbehaving(pfrom, 100, "invalid compact block");
                         return;
                     } else if (status == READ_STATUS_FAILED) {
                         // Duplicate txindices, the block is now in-flight, so
                         // just request it.
                         std::vector<CInv> vInv(1);
                         vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                         m_connman.PushMessage(
                             &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                         return;
                     }
 
                     BlockTransactionsRequest req;
                     for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
                         if (!partialBlock.IsTxAvailable(i)) {
                             req.indices.push_back(i);
                         }
                     }
                     if (req.indices.empty()) {
                         // Dirty hack to jump to BLOCKTXN code (TODO: move
                         // message handling into their own functions)
                         BlockTransactions txn;
                         txn.blockhash = cmpctblock.header.GetHash();
                         blockTxnMsg << txn;
                         fProcessBLOCKTXN = true;
                     } else {
                         req.blockhash = pindex->GetBlockHash();
                         m_connman.PushMessage(
                             &pfrom,
                             msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
                     }
                 } else {
                     // This block is either already in flight from a different
                     // peer, or this peer has too many blocks outstanding to
                     // download from. Optimistically try to reconstruct anyway
                     // since we might be able to without any round trips.
                     PartiallyDownloadedBlock tempBlock(config, &m_mempool);
                     ReadStatus status =
                         tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status != READ_STATUS_OK) {
                         // TODO: don't ignore failures
                         return;
                     }
                     std::vector<CTransactionRef> dummy;
                     status = tempBlock.FillBlock(*pblock, dummy);
                     if (status == READ_STATUS_OK) {
                         fBlockReconstructed = true;
                     }
                 }
             } else {
                 if (fAlreadyInFlight) {
                     // We requested this block, but its far into the future, so
                     // our mempool will probably be useless - request the block
                     // normally.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     m_connman.PushMessage(
                         &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                     return;
                 } else {
                     // If this was an announce-cmpctblock, we want the same
                     // treatment as a header message.
                     fRevertToHeaderProcessing = true;
                 }
             }
         } // cs_main
 
         if (fProcessBLOCKTXN) {
             return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
                                   blockTxnMsg, time_received, interruptMsgProc);
         }
 
         if (fRevertToHeaderProcessing) {
             // Headers received from HB compact block peers are permitted to be
             // relayed before full validation (see BIP 152), so we don't want to
             // disconnect the peer if the header turns out to be for an invalid
             // block. Note that if a peer tries to build on an invalid chain,
             // that will be detected and the peer will be banned.
             return ProcessHeadersMessage(config, pfrom, *peer,
                                          {cmpctblock.header},
                                          /*via_compact_block=*/true);
         }
 
         if (fBlockReconstructed) {
             // If we got here, we were able to optimistically reconstruct a
             // block that is in flight from some other peer.
             {
                 LOCK(cs_main);
                 mapBlockSource.emplace(pblock->GetHash(),
                                        std::make_pair(pfrom.GetId(), false));
             }
             // Setting force_processing to true means that we bypass some of
             // our anti-DoS protections in AcceptBlock, which filters
             // unrequested blocks that might be trying to waste our resources
             // (eg disk space). Because we only try to reconstruct blocks when
             // we're close to caught up (via the CanDirectFetch() requirement
             // above, combined with the behavior of not requesting blocks until
             // we have a chain with at least nMinimumChainWork), and we ignore
             // compact blocks with less work than our tip, it is safe to treat
             // reconstructed compact blocks as having been requested.
             ProcessBlock(config, pfrom, pblock, /*force_processing=*/true);
             // hold cs_main for CBlockIndex::IsValid()
             LOCK(cs_main);
             if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
                 // Clear download state for this block, which is in process from
                 // some other peer. We do this after calling. ProcessNewBlock so
                 // that a malleated cmpctblock announcement can't be used to
                 // interfere with block relay.
                 RemoveBlockRequest(pblock->GetHash());
             }
         }
         return;
     }
 
     if (msg_type == NetMsgType::BLOCKTXN) {
         // Ignore blocktxn received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected blocktxn message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         BlockTransactions resp;
         vRecv >> resp;
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockRead = false;
         {
             LOCK(cs_main);
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator it = mapBlocksInFlight.find(resp.blockhash);
             if (it == mapBlocksInFlight.end() ||
                 !it->second.second->partialBlock ||
                 it->second.first != pfrom.GetId()) {
                 LogPrint(BCLog::NET,
                          "Peer %d sent us block transactions for block "
                          "we weren't expecting\n",
                          pfrom.GetId());
                 return;
             }
 
             PartiallyDownloadedBlock &partialBlock =
                 *it->second.second->partialBlock;
             ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
             if (status == READ_STATUS_INVALID) {
                 // Reset in-flight state in case of Misbehaving does not
                 // result in a disconnect.
                 RemoveBlockRequest(resp.blockhash);
                 Misbehaving(
                     pfrom, 100,
                     "invalid compact block/non-matching block transactions");
                 return;
             } else if (status == READ_STATUS_FAILED) {
                 // Might have collided, fall back to getdata now :(
                 std::vector<CInv> invs;
                 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
                 m_connman.PushMessage(&pfrom,
                                       msgMaker.Make(NetMsgType::GETDATA, invs));
             } else {
                 // Block is either okay, or possibly we received
                 // READ_STATUS_CHECKBLOCK_FAILED.
                 // Note that CheckBlock can only fail for one of a few reasons:
                 // 1. bad-proof-of-work (impossible here, because we've already
                 //    accepted the header)
                 // 2. merkleroot doesn't match the transactions given (already
                 //    caught in FillBlock with READ_STATUS_FAILED, so
                 //    impossible here)
                 // 3. the block is otherwise invalid (eg invalid coinbase,
                 //    block is too big, too many sigChecks, etc).
                 // So if CheckBlock failed, #3 is the only possibility.
                 // Under BIP 152, we don't DoS-ban unless proof of work is
                 // invalid (we don't require all the stateless checks to have
                 // been run). This is handled below, so just treat this as
                 // though the block was successfully read, and rely on the
                 // handling in ProcessNewBlock to ensure the block index is
                 // updated, etc.
 
                 // it is now an empty pointer
                 RemoveBlockRequest(resp.blockhash);
                 fBlockRead = true;
                 // mapBlockSource is used for potentially punishing peers and
                 // updating which peers send us compact blocks, so the race
                 // between here and cs_main in ProcessNewBlock is fine.
                 // BIP 152 permits peers to relay compact blocks after
                 // validating the header only; we should not punish peers
                 // if the block turns out to be invalid.
                 mapBlockSource.emplace(resp.blockhash,
                                        std::make_pair(pfrom.GetId(), false));
             }
         } // Don't hold cs_main when we call into ProcessNewBlock
         if (fBlockRead) {
             // Since we requested this block (it was in mapBlocksInFlight),
             // force it to be processed, even if it would not be a candidate for
             // new tip (missing previous block, chain not long enough, etc)
             // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
             // disk-space attacks), but this should be safe due to the
             // protections in the compact block handler -- see related comment
             // in compact block optimistic reconstruction handling.
             ProcessBlock(config, pfrom, pblock, /*force_processing=*/true);
         }
         return;
     }
 
     if (msg_type == NetMsgType::HEADERS) {
         // Ignore headers received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected headers message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         std::vector<CBlockHeader> headers;
 
         // Bypass the normal CBlock deserialization, as we don't want to risk
         // deserializing 2000 full blocks.
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > MAX_HEADERS_RESULTS) {
             Misbehaving(pfrom, 20,
                         strprintf("too-many-headers: headers message size = %u",
                                   nCount));
             return;
         }
         headers.resize(nCount);
         for (unsigned int n = 0; n < nCount; n++) {
             vRecv >> headers[n];
             // Ignore tx count; assume it is 0.
             ReadCompactSize(vRecv);
         }
 
         return ProcessHeadersMessage(config, pfrom, *peer, headers,
                                      /*via_compact_block=*/false);
     }
 
     if (msg_type == NetMsgType::BLOCK) {
         // Ignore block received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected block message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         vRecv >> *pblock;
 
         LogPrint(BCLog::NET, "received block %s peer=%d\n",
                  pblock->GetHash().ToString(), pfrom.GetId());
 
         // Process all blocks from whitelisted peers, even if not requested,
         // unless we're still syncing with the network. Such an unrequested
         // block may still be processed, subject to the conditions in
         // AcceptBlock().
         bool forceProcessing =
             pfrom.HasPermission(NetPermissionFlags::NoBan) &&
             !m_chainman.ActiveChainstate().IsInitialBlockDownload();
         const BlockHash hash = pblock->GetHash();
         {
             LOCK(cs_main);
             // Always process the block if we requested it, since we may
             // need it even when it's not a candidate for a new best tip.
             forceProcessing = IsBlockRequested(hash);
             RemoveBlockRequest(hash);
             // mapBlockSource is only used for punishing peers and setting
             // which peers send us compact blocks, so the race between here and
             // cs_main in ProcessNewBlock is fine.
             mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
         }
         ProcessBlock(config, pfrom, pblock, forceProcessing);
         return;
     }
 
     if (msg_type == NetMsgType::AVAHELLO) {
         {
             LOCK(pfrom.cs_avalanche_pubkey);
             if (pfrom.m_avalanche_pubkey.has_value()) {
                 LogPrint(
                     BCLog::AVALANCHE,
                     "Ignoring avahello from peer %d: already in our node set\n",
                     pfrom.GetId());
                 return;
             }
 
             avalanche::Delegation delegation;
             vRecv >> delegation;
 
             // A delegation with an all zero limited id indicates that the peer
             // has no proof, so we're done.
             if (delegation.getLimitedProofId() != uint256::ZERO) {
                 avalanche::DelegationState state;
                 CPubKey pubkey;
                 if (!delegation.verify(state, pubkey)) {
                     Misbehaving(pfrom, 100, "invalid-delegation");
                     return;
                 }
                 pfrom.m_avalanche_pubkey = std::move(pubkey);
 
                 CHashWriter sighasher(SER_GETHASH, 0);
                 sighasher << delegation.getId();
                 sighasher << pfrom.nRemoteHostNonce;
                 sighasher << pfrom.GetLocalNonce();
                 sighasher << pfrom.nRemoteExtraEntropy;
                 sighasher << pfrom.GetLocalExtraEntropy();
 
                 SchnorrSig sig;
                 vRecv >> sig;
                 if (!(*pfrom.m_avalanche_pubkey)
                          .VerifySchnorr(sighasher.GetHash(), sig)) {
                     Misbehaving(pfrom, 100, "invalid-avahello-signature");
                     return;
                 }
 
                 // If we don't know this proof already, add it to the tracker so
                 // it can be requested.
                 const avalanche::ProofId proofid(delegation.getProofId());
                 if (!AlreadyHaveProof(proofid)) {
                     const bool preferred = isPreferredDownloadPeer(pfrom);
                     LOCK(cs_proofrequest);
                     AddProofAnnouncement(pfrom, proofid,
                                          GetTime<std::chrono::microseconds>(),
                                          preferred);
                 }
 
                 // Don't check the return value. If it fails we probably don't
                 // know about the proof yet.
                 g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
                     return pm.addNode(pfrom.GetId(), proofid);
                 });
             }
 
             pfrom.m_avalanche_enabled = true;
         }
 
         // Send getavaaddr and getavaproofs to our avalanche outbound or
         // manual connections
         if (!pfrom.IsInboundConn()) {
             m_connman.PushMessage(&pfrom,
                                   msgMaker.Make(NetMsgType::GETAVAADDR));
             WITH_LOCK(peer->m_addr_token_bucket_mutex,
                       peer->m_addr_token_bucket += GetMaxAddrToSend());
 
             if (peer->m_proof_relay &&
                 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                 m_connman.PushMessage(&pfrom,
                                       msgMaker.Make(NetMsgType::GETAVAPROOFS));
                 peer->m_proof_relay->compactproofs_requested = true;
             }
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPOLL) {
         const auto now = Now<SteadyMilliseconds>();
         const int64_t cooldown =
             gArgs.GetIntArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
 
         const auto last_poll = pfrom.m_last_poll;
         pfrom.m_last_poll = now;
 
         if (now < last_poll + std::chrono::milliseconds(cooldown)) {
             LogPrint(BCLog::AVALANCHE,
                      "Ignoring repeated avapoll from peer %d: cooldown not "
                      "elapsed\n",
                      pfrom.GetId());
             return;
         }
 
         const bool quorum_established =
             g_avalanche && g_avalanche->isQuorumEstablished();
 
         uint64_t round;
         Unserialize(vRecv, round);
 
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
             Misbehaving(
                 pfrom, 20,
                 strprintf("too-many-ava-poll: poll message size = %u", nCount));
             return;
         }
 
         std::vector<avalanche::Vote> votes;
         votes.reserve(nCount);
 
         for (unsigned int n = 0; n < nCount; n++) {
             CInv inv;
             vRecv >> inv;
 
             // Default vote for unknown inv type
             uint32_t vote = -1;
 
             // We don't vote definitively until we have an established quorum
             if (!quorum_established) {
                 votes.emplace_back(vote, inv.hash);
                 continue;
             }
 
             // If inv's type is known, get a vote for its hash
             switch (inv.type) {
                 case MSG_TX: {
                     if (gArgs.GetBoolArg("-avalanchepreconsensus", false)) {
                         vote = WITH_LOCK(cs_main, return GetAvalancheVoteForTx(
                                                       TxId(inv.hash)));
                     }
                 } break;
                 case MSG_BLOCK: {
                     vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
                                                   BlockHash(inv.hash)));
                 } break;
                 case MSG_AVA_PROOF: {
                     vote =
                         getAvalancheVoteForProof(avalanche::ProofId(inv.hash));
                 } break;
                 default: {
                     LogPrint(BCLog::AVALANCHE,
                              "poll inv type %d unknown from peer=%d\n",
                              inv.type, pfrom.GetId());
                 }
             }
 
             votes.emplace_back(vote, inv.hash);
         }
 
         // Send the query to the node.
         g_avalanche->sendResponse(
             &pfrom, avalanche::Response(round, cooldown, std::move(votes)));
         return;
     }
 
     if (msg_type == NetMsgType::AVARESPONSE) {
         // As long as QUIC is not implemented, we need to sign response and
         // verify response's signatures in order to avoid any manipulation of
         // messages at the transport level.
         CHashVerifier<CDataStream> verifier(&vRecv);
         avalanche::Response response;
         verifier >> response;
 
         SchnorrSig sig;
         vRecv >> sig;
 
         {
             LOCK(pfrom.cs_avalanche_pubkey);
             if (!pfrom.m_avalanche_pubkey.has_value() ||
                 !(*pfrom.m_avalanche_pubkey)
                      .VerifySchnorr(verifier.GetHash(), sig)) {
                 Misbehaving(pfrom, 100, "invalid-ava-response-signature");
                 return;
             }
         }
 
         auto now = GetTime<std::chrono::seconds>();
 
         std::vector<avalanche::VoteItemUpdate> updates;
         int banscore{0};
         std::string error;
         if (!g_avalanche->registerVotes(pfrom.GetId(), response, updates,
                                         banscore, error)) {
             if (banscore > 0) {
                 // If the banscore was set, just increase the node ban score
                 Misbehaving(pfrom, banscore, error);
                 return;
             }
 
             // Otherwise the node may have got a network issue. Increase the
             // fault counter instead and only ban if we reached a threshold.
             // This allows for fault tolerance should there be a temporary
             // outage while still preventing DoS'ing behaviors, as the counter
             // is reset if no fault occured over some time period.
             pfrom.m_avalanche_message_fault_counter++;
             pfrom.m_avalanche_last_message_fault = now;
 
             // Allow up to 12 messages before increasing the ban score. Since
             // the queries are cleared after 10s, this is at least 2 minutes
             // of network outage tolerance over the 1h window.
             if (pfrom.m_avalanche_message_fault_counter > 12) {
                 Misbehaving(pfrom, 2, error);
                 return;
             }
         }
 
         // If no fault occurred within the last hour, reset the fault counter
         if (now > (pfrom.m_avalanche_last_message_fault.load() + 1h)) {
             pfrom.m_avalanche_message_fault_counter = 0;
         }
 
         pfrom.invsVoted(response.GetVotes().size());
 
         auto logVoteUpdate = [](const auto &voteUpdate,
                                 const std::string &voteItemTypeStr,
                                 const auto &voteItemId) {
             std::string voteOutcome;
             switch (voteUpdate.getStatus()) {
                 case avalanche::VoteStatus::Invalid:
                     voteOutcome = "invalidated";
                     break;
                 case avalanche::VoteStatus::Rejected:
                     voteOutcome = "rejected";
                     break;
                 case avalanche::VoteStatus::Accepted:
                     voteOutcome = "accepted";
                     break;
                 case avalanche::VoteStatus::Finalized:
                     voteOutcome = "finalized";
                     break;
                 case avalanche::VoteStatus::Stale:
                     voteOutcome = "stalled";
                     break;
 
                     // No default case, so the compiler can warn about missing
                     // cases
             }
 
             LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
                      voteItemTypeStr, voteItemId.ToString());
         };
 
         bool shouldActivateBestChain = false;
 
         for (const auto &u : updates) {
             const avalanche::AnyVoteItem &item = u.getVoteItem();
 
             // Don't use a visitor here as we want to ignore unsupported item
             // types. This comes in handy when adding new types.
             if (auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
                 avalanche::ProofRef proof = *pitem;
                 const avalanche::ProofId &proofid = proof->getId();
 
                 logVoteUpdate(u, "proof", proofid);
 
                 auto rejectionMode =
                     avalanche::PeerManager::RejectionMode::DEFAULT;
                 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
                 switch (u.getStatus()) {
                     case avalanche::VoteStatus::Invalid:
                         g_avalanche->withPeerManager(
                             [&](avalanche::PeerManager &pm) {
                                 pm.setInvalid(proofid);
                             });
                         // Fallthrough
                     case avalanche::VoteStatus::Stale:
                         // Invalidate mode removes the proof from all proof
                         // pools
                         rejectionMode =
                             avalanche::PeerManager::RejectionMode::INVALIDATE;
                         // Fallthrough
                     case avalanche::VoteStatus::Rejected:
                         if (!g_avalanche->withPeerManager(
                                 [&](avalanche::PeerManager &pm) {
                                     return pm.rejectProof(proofid,
                                                           rejectionMode);
                                 })) {
                             LogPrint(BCLog::AVALANCHE,
                                      "ERROR: Failed to reject proof: %s\n",
                                      proofid.GetHex());
                         }
                         break;
                     case avalanche::VoteStatus::Finalized:
                         nextCooldownTimePoint +=
                             std::chrono::seconds(gArgs.GetIntArg(
                                 "-avalanchepeerreplacementcooldown",
                                 AVALANCHE_DEFAULT_PEER_REPLACEMENT_COOLDOWN));
                     case avalanche::VoteStatus::Accepted:
                         if (!g_avalanche->withPeerManager(
                                 [&](avalanche::PeerManager &pm) {
                                     pm.registerProof(
                                         proof,
                                         avalanche::PeerManager::
                                             RegistrationMode::FORCE_ACCEPT);
                                     return pm.forPeer(
                                         proofid,
                                         [&](const avalanche::Peer &peer) {
                                             pm.updateNextPossibleConflictTime(
                                                 peer.peerid,
                                                 nextCooldownTimePoint);
                                             if (u.getStatus() ==
                                                 avalanche::VoteStatus::
                                                     Finalized) {
                                                 pm.setFinalized(peer.peerid);
                                             }
                                             // Only fail if the peer was not
                                             // created
                                             return true;
                                         });
                                 })) {
                             LogPrint(BCLog::AVALANCHE,
                                      "ERROR: Failed to accept proof: %s\n",
                                      proofid.GetHex());
                         }
                         break;
                 }
             }
 
             if (auto pitem = std::get_if<const CBlockIndex *>(&item)) {
                 CBlockIndex *pindex = const_cast<CBlockIndex *>(*pitem);
 
                 shouldActivateBestChain = true;
 
                 logVoteUpdate(u, "block", pindex->GetBlockHash());
 
                 switch (u.getStatus()) {
                     case avalanche::VoteStatus::Invalid:
                     case avalanche::VoteStatus::Rejected: {
                         BlockValidationState state;
                         m_chainman.ActiveChainstate().ParkBlock(config, state,
                                                                 pindex);
                         if (!state.IsValid()) {
                             LogPrintf("ERROR: Database error: %s\n",
                                       state.GetRejectReason());
                             return;
                         }
                     } break;
                     case avalanche::VoteStatus::Accepted: {
                         LOCK(cs_main);
                         m_chainman.ActiveChainstate().UnparkBlock(pindex);
                     } break;
                     case avalanche::VoteStatus::Finalized: {
                         {
                             LOCK(cs_main);
                             m_chainman.ActiveChainstate().UnparkBlock(pindex);
                         }
                         m_chainman.ActiveChainstate().AvalancheFinalizeBlock(
                             pindex);
                     } break;
                     case avalanche::VoteStatus::Stale:
                         // Fall back on Nakamoto consensus in the absence of
                         // Avalanche votes for other competing or descendant
                         // blocks.
                         break;
                 }
             }
         }
 
         if (shouldActivateBestChain) {
             BlockValidationState state;
             if (!m_chainman.ActiveChainstate().ActivateBestChain(config,
                                                                  state)) {
                 LogPrintf("failed to activate chain (%s)\n", state.ToString());
             }
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPROOF) {
         auto proof = RCUPtr<avalanche::Proof>::make();
         vRecv >> *proof;
 
         ReceivedAvalancheProof(pfrom, *peer, proof);
 
         return;
     }
 
     if (msg_type == NetMsgType::GETAVAPROOFS) {
         if (peer->m_proof_relay == nullptr) {
             return;
         }
 
         peer->m_proof_relay->lastSharedProofsUpdate =
             GetTime<std::chrono::seconds>();
 
         peer->m_proof_relay->sharedProofs =
             g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
                 return pm.getShareableProofsSnapshot();
             });
 
         avalanche::CompactProofs compactProofs(
             peer->m_proof_relay->sharedProofs);
         m_connman.PushMessage(
             &pfrom, msgMaker.Make(NetMsgType::AVAPROOFS, compactProofs));
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPROOFS) {
         if (peer->m_proof_relay == nullptr) {
             return;
         }
 
         // Only process the compact proofs if we requested them
         if (!peer->m_proof_relay->compactproofs_requested) {
             LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
             return;
         }
         peer->m_proof_relay->compactproofs_requested = false;
 
         avalanche::CompactProofs compactProofs;
         try {
             vRecv >> compactProofs;
         } catch (std::ios_base::failure &e) {
             // This compact proofs have non contiguous or overflowing indexes
             Misbehaving(pfrom, 100, "avaproofs-bad-indexes");
             return;
         }
 
         // If there are prefilled proofs, process them first
         std::set<uint32_t> prefilledIndexes;
         for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
             if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
                 // If we got an invalid proof, the peer is getting banned and we
                 // can bail out.
                 return;
             }
         }
 
         // If there is no shortid, avoid parsing/responding/accounting for the
         // message.
         if (compactProofs.getShortIDs().size() == 0) {
             LogPrint(BCLog::AVALANCHE,
                      "Got an avaproofs message with no shortid (peer %d)\n",
                      pfrom.GetId());
             return;
         }
 
         // To determine the chance that the number of entries in a bucket
         // exceeds N, we use the fact that the number of elements in a single
         // bucket is binomially distributed (with n = the number of shorttxids
         // S, and p = 1 / the number of buckets), that in the worst case the
         // number of buckets is equal to S (due to std::unordered_map having a
         // default load factor of 1.0), and that the chance for any bucket to
         // exceed N elements is at most buckets * (the chance that any given
         // bucket is above N elements). Thus:
         //   P(max_elements_per_bucket > N) <=
         //     S * (1 - cdf(binomial(n=S,p=1/S), N))
         // If we assume up to 21000000, allowing 15 elements per bucket should
         // only fail once per ~2.5 million avaproofs transfers (per peer and
         // connection).
         // TODO re-evaluate the bucket count to a more realistic value.
         // TODO: In the case of a shortid-collision, we should request all the
         // proofs which collided. For now, we only request one, which is not
         // that bad considering this event is expected to be very rare.
         auto shortIdProcessor =
             avalanche::ProofShortIdProcessor(compactProofs.getPrefilledProofs(),
                                              compactProofs.getShortIDs(), 15);
 
         if (shortIdProcessor.hasOutOfBoundIndex()) {
             // This should be catched by deserialization, but catch it here as
             // well as a good measure.
             Misbehaving(pfrom, 100, "avaproofs-bad-indexes");
             return;
         }
         if (!shortIdProcessor.isEvenlyDistributed()) {
             // This is suspicious, don't ban but bail out
             return;
         }
 
         size_t proofCount = 0;
         std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
         g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
             pm.forEachPeer([&](const avalanche::Peer &peer) {
                 assert(peer.proof);
                 uint64_t shortid = compactProofs.getShortID(peer.getProofId());
 
                 int added =
                     shortIdProcessor.matchKnownItem(shortid, peer.proof);
 
                 // No collision
                 if (added >= 0) {
                     // Because we know the proof, we can determine if our peer
                     // has it (added = 1) or not (added = 0) and update the
                     // remote proof status accordingly.
                     remoteProofsStatus.emplace_back(peer.getProofId(),
                                                     added > 0);
                 }
 
                 proofCount += added;
 
                 // In order to properly determine which proof is missing, we
                 // need to keep scanning for all our proofs.
                 return true;
             });
         });
 
         avalanche::ProofsRequest req;
         for (size_t i = 0; i < compactProofs.size(); i++) {
             if (shortIdProcessor.getItem(i) == nullptr) {
                 req.indices.push_back(i);
             }
         }
 
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::AVAPROOFSREQ, req));
 
         const NodeId nodeid = pfrom.GetId();
 
         // We want to keep a count of how many nodes we successfully requested
         // avaproofs from as this is used to determine when we are confident our
         // quorum is close enough to the other participants.
         g_avalanche->avaproofsSent(nodeid);
 
         if (pfrom.IsAvalancheOutboundConnection() || pfrom.IsManualConn()) {
             g_avalanche->withPeerManager(
                 [&remoteProofsStatus, nodeid](avalanche::PeerManager &pm) {
                     for (const auto &[proofid, present] : remoteProofsStatus) {
                         pm.saveRemoteProof(proofid, nodeid, present);
                     }
                 });
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPROOFSREQ) {
         if (peer->m_proof_relay == nullptr) {
             return;
         }
 
         avalanche::ProofsRequest proofreq;
         vRecv >> proofreq;
 
         auto requestedIndiceIt = proofreq.indices.begin();
         uint32_t treeIndice = 0;
         peer->m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
             if (requestedIndiceIt == proofreq.indices.end()) {
                 // No more indice to process
                 return false;
             }
 
             if (treeIndice++ == *requestedIndiceIt) {
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
                 requestedIndiceIt++;
             }
 
             return true;
         });
 
         peer->m_proof_relay->sharedProofs = {};
         return;
     }
 
     if (msg_type == NetMsgType::GETADDR) {
         // This asymmetric behavior for inbound and outbound connections was
         // introduced to prevent a fingerprinting attack: an attacker can send
         // specific fake addresses to users' AddrMan and later request them by
         // sending getaddr messages. Making nodes which are behind NAT and can
         // only make outgoing connections ignore the getaddr message mitigates
         // the attack.
         if (!pfrom.IsInboundConn()) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from %s connection. peer=%d\n",
                      pfrom.ConnectionTypeAsString(), pfrom.GetId());
             return;
         }
 
         // Since this must be an inbound connection, SetupAddressRelay will
         // never fail.
         Assume(SetupAddressRelay(pfrom, *peer));
 
         // Only send one GetAddr response per connection to reduce resource
         // waste and discourage addr stamping of INV announcements.
         if (peer->m_getaddr_recvd) {
             LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
                      pfrom.GetId());
             return;
         }
         peer->m_getaddr_recvd = true;
 
         peer->m_addrs_to_send.clear();
         std::vector<CAddress> vAddr;
         const size_t maxAddrToSend = GetMaxAddrToSend();
         if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
             vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
                                            /* network */ std::nullopt);
         } else {
             vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
                                            MAX_PCT_ADDR_TO_SEND);
         }
         FastRandomContext insecure_rand;
         for (const CAddress &addr : vAddr) {
             PushAddress(*peer, addr, insecure_rand);
         }
         return;
     }
 
     if (msg_type == NetMsgType::GETAVAADDR) {
         auto now = GetTime<std::chrono::seconds>();
         if (now < pfrom.m_nextGetAvaAddr) {
             // Prevent a peer from exhausting our resources by spamming
             // getavaaddr messages.
             LogPrint(BCLog::AVALANCHE,
                      "Ignoring repeated getavaaddr from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         // Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
         pfrom.m_nextGetAvaAddr = now + GETAVAADDR_INTERVAL;
 
         if (!SetupAddressRelay(pfrom, *peer)) {
             LogPrint(BCLog::AVALANCHE,
                      "Ignoring getavaaddr message from %s peer=%d\n",
                      pfrom.ConnectionTypeAsString(), pfrom.GetId());
             return;
         }
 
         auto availabilityScoreComparator = [](const CNode *lhs,
                                               const CNode *rhs) {
             double scoreLhs = lhs->getAvailabilityScore();
             double scoreRhs = rhs->getAvailabilityScore();
 
             if (scoreLhs != scoreRhs) {
                 return scoreLhs > scoreRhs;
             }
 
             return lhs < rhs;
         };
 
         // Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
         // most active in the avalanche network. Account for 0 availability as
         // well so we can send addresses even if we did not start polling yet.
         std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
             availabilityScoreComparator);
         m_connman.ForEachNode([&](const CNode *pnode) {
             if (!pnode->m_avalanche_enabled ||
                 pnode->getAvailabilityScore() < 0.) {
                 return;
             }
 
             avaNodes.insert(pnode);
             if (avaNodes.size() > GetMaxAddrToSend()) {
                 avaNodes.erase(std::prev(avaNodes.end()));
             }
         });
 
         peer->m_addrs_to_send.clear();
         FastRandomContext insecure_rand;
         for (const CNode *pnode : avaNodes) {
             PushAddress(*peer, pnode->addr, insecure_rand);
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::MEMPOOL) {
         if (!(peer->m_our_services & NODE_BLOOM) &&
             !pfrom.HasPermission(NetPermissionFlags::Mempool)) {
             if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bloom filters disabled, "
                          "disconnect peer=%d\n",
                          pfrom.GetId());
                 pfrom.fDisconnect = true;
             }
             return;
         }
 
         if (m_connman.OutboundTargetReached(false) &&
             !pfrom.HasPermission(NetPermissionFlags::Mempool)) {
             if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bandwidth limit reached, "
                          "disconnect peer=%d\n",
                          pfrom.GetId());
                 pfrom.fDisconnect = true;
             }
             return;
         }
 
         if (auto tx_relay = peer->GetTxRelay()) {
             LOCK(tx_relay->m_tx_inventory_mutex);
             tx_relay->m_send_mempool = true;
         }
         return;
     }
 
     if (msg_type == NetMsgType::PING) {
         if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
             uint64_t nonce = 0;
             vRecv >> nonce;
             // Echo the message back with the nonce. This allows for two useful
             // features:
             //
             // 1) A remote node can quickly check if the connection is
             // operational.
             // 2) Remote nodes can measure the latency of the network thread. If
             // this node is overloaded it won't respond to pings quickly and the
             // remote node can avoid sending us more work, like chain download
             // requests.
             //
             // The nonce stops the remote getting confused between different
             // pings: without it, if the remote node sends a ping once per
             // second and this node takes 5 seconds to respond to each, the 5th
             // ping the remote sends would appear to return very quickly.
             m_connman.PushMessage(&pfrom,
                                   msgMaker.Make(NetMsgType::PONG, nonce));
         }
         return;
     }
 
     if (msg_type == NetMsgType::PONG) {
         const auto ping_end = time_received;
         uint64_t nonce = 0;
         size_t nAvail = vRecv.in_avail();
         bool bPingFinished = false;
         std::string sProblem;
 
         if (nAvail >= sizeof(nonce)) {
             vRecv >> nonce;
 
             // Only process pong message if there is an outstanding ping (old
             // ping without nonce should never pong)
             if (peer->m_ping_nonce_sent != 0) {
                 if (nonce == peer->m_ping_nonce_sent) {
                     // Matching pong received, this ping is no longer
                     // outstanding
                     bPingFinished = true;
                     const auto ping_time = ping_end - peer->m_ping_start.load();
                     if (ping_time.count() >= 0) {
                         // Let connman know about this successful ping-pong
                         pfrom.PongReceived(ping_time);
                     } else {
                         // This should never happen
                         sProblem = "Timing mishap";
                     }
                 } else {
                     // Nonce mismatches are normal when pings are overlapping
                     sProblem = "Nonce mismatch";
                     if (nonce == 0) {
                         // This is most likely a bug in another implementation
                         // somewhere; cancel this ping
                         bPingFinished = true;
                         sProblem = "Nonce zero";
                     }
                 }
             } else {
                 sProblem = "Unsolicited pong without ping";
             }
         } else {
             // This is most likely a bug in another implementation somewhere;
             // cancel this ping
             bPingFinished = true;
             sProblem = "Short payload";
         }
 
         if (!(sProblem.empty())) {
             LogPrint(BCLog::NET,
                      "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
                      pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
                      nAvail);
         }
         if (bPingFinished) {
             peer->m_ping_nonce_sent = 0;
         }
         return;
     }
 
     if (msg_type == NetMsgType::FILTERLOAD) {
         if (!(peer->m_our_services & NODE_BLOOM)) {
             LogPrint(BCLog::NET,
                      "filterload received despite not offering bloom services "
                      "from peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
         CBloomFilter filter;
         vRecv >> filter;
 
         if (!filter.IsWithinSizeConstraints()) {
             // There is no excuse for sending a too-large filter
             Misbehaving(pfrom, 100, "too-large bloom filter");
         } else if (auto tx_relay = peer->GetTxRelay()) {
             {
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
                 tx_relay->m_relay_txs = true;
             }
             pfrom.m_bloom_filter_loaded = true;
         }
         return;
     }
 
     if (msg_type == NetMsgType::FILTERADD) {
         if (!(peer->m_our_services & NODE_BLOOM)) {
             LogPrint(BCLog::NET,
                      "filteradd received despite not offering bloom services "
                      "from peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
         std::vector<uint8_t> vData;
         vRecv >> vData;
 
         // Nodes must NEVER send a data item > 520 bytes (the max size for a
         // script data object, and thus, the maximum size any matched object can
         // have) in a filteradd message.
         bool bad = false;
         if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
             bad = true;
         } else if (auto tx_relay = peer->GetTxRelay()) {
             LOCK(tx_relay->m_bloom_filter_mutex);
             if (tx_relay->m_bloom_filter) {
                 tx_relay->m_bloom_filter->insert(vData);
             } else {
                 bad = true;
             }
         }
         if (bad) {
             // The structure of this code doesn't really allow for a good error
             // code. We'll go generic.
             Misbehaving(pfrom, 100, "bad filteradd message");
         }
         return;
     }
 
     if (msg_type == NetMsgType::FILTERCLEAR) {
         if (!(peer->m_our_services & NODE_BLOOM)) {
             LogPrint(BCLog::NET,
                      "filterclear received despite not offering bloom services "
                      "from peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
         auto tx_relay = peer->GetTxRelay();
         if (!tx_relay) {
             return;
         }
 
         {
             LOCK(tx_relay->m_bloom_filter_mutex);
             tx_relay->m_bloom_filter = nullptr;
             tx_relay->m_relay_txs = true;
         }
         pfrom.m_bloom_filter_loaded = false;
         pfrom.m_relays_txs = true;
         return;
     }
 
     if (msg_type == NetMsgType::FEEFILTER) {
         Amount newFeeFilter = Amount::zero();
         vRecv >> newFeeFilter;
         if (MoneyRange(newFeeFilter)) {
             if (auto tx_relay = peer->GetTxRelay()) {
                 tx_relay->m_fee_filter_received = newFeeFilter;
             }
             LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
                      CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
         }
         return;
     }
 
     if (msg_type == NetMsgType::GETCFILTERS) {
         ProcessGetCFilters(pfrom, *peer, vRecv);
         return;
     }
 
     if (msg_type == NetMsgType::GETCFHEADERS) {
         ProcessGetCFHeaders(pfrom, *peer, vRecv);
         return;
     }
 
     if (msg_type == NetMsgType::GETCFCHECKPT) {
         ProcessGetCFCheckPt(pfrom, *peer, vRecv);
         return;
     }
 
     if (msg_type == NetMsgType::NOTFOUND) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         // A peer might send up to 1 notfound per getdata request, but no more
         if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
                                TX_REQUEST_PARAMS.max_peer_announcements +
                                MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             for (CInv &inv : vInv) {
                 if (inv.IsMsgTx()) {
                     // If we receive a NOTFOUND message for a tx we requested,
                     // mark the announcement for it as completed in
                     // InvRequestTracker.
                     LOCK(::cs_main);
                     m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
                     continue;
                 }
                 if (inv.IsMsgProof()) {
                     LOCK(cs_proofrequest);
                     m_proofrequest.ReceivedResponse(
                         pfrom.GetId(), avalanche::ProofId(inv.hash));
                 }
             }
         }
         return;
     }
 
     // Ignore unknown commands for extensibility
     LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
              SanitizeString(msg_type), pfrom.GetId());
     return;
 }
 
 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
     {
         LOCK(peer.m_misbehavior_mutex);
 
         // There's nothing to do if the m_should_discourage flag isn't set
         if (!peer.m_should_discourage) {
             return false;
         }
 
         peer.m_should_discourage = false;
     } // peer.m_misbehavior_mutex
 
     if (pnode.HasPermission(NetPermissionFlags::NoBan)) {
         // We never disconnect or discourage peers for bad behavior if they have
         // NetPermissionFlags::NoBan permission
         LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
         return false;
     }
 
     if (pnode.IsManualConn()) {
         // We never disconnect or discourage manual peers for bad behavior
         LogPrintf("Warning: not punishing manually connected peer %d!\n",
                   peer.m_id);
         return false;
     }
 
     if (pnode.addr.IsLocal()) {
         // We disconnect local peers for bad behavior but don't discourage
         // (since that would discourage all peers on the same local address)
         LogPrint(BCLog::NET,
                  "Warning: disconnecting but not discouraging %s peer %d!\n",
                  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
         pnode.fDisconnect = true;
         return true;
     }
 
     // Normal case: Disconnect the peer and discourage all nodes sharing the
     // address
     LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
              peer.m_id);
     if (m_banman) {
         m_banman->Discourage(pnode.addr);
     }
     m_connman.DisconnectNode(pnode.addr);
     return true;
 }
 
 bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
                                       std::atomic<bool> &interruptMsgProc) {
     //
     // Message format
     //  (4) message start
     //  (12) command
     //  (4) size
     //  (4) checksum
     //  (x) data
     //
     bool fMoreWork = false;
 
     PeerRef peer = GetPeerRef(pfrom->GetId());
     if (peer == nullptr) {
         return false;
     }
 
     {
         LOCK(peer->m_getdata_requests_mutex);
         if (!peer->m_getdata_requests.empty()) {
             ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
         }
     }
 
     {
         LOCK2(cs_main, g_cs_orphans);
         if (!peer->m_orphan_work_set.empty()) {
             ProcessOrphanTx(config, peer->m_orphan_work_set);
         }
     }
 
     if (pfrom->fDisconnect) {
         return false;
     }
 
     // this maintains the order of responses and prevents m_getdata_requests
     // from growing unbounded
     {
         LOCK(peer->m_getdata_requests_mutex);
         if (!peer->m_getdata_requests.empty()) {
             return true;
         }
     }
 
     {
         LOCK(g_cs_orphans);
         if (!peer->m_orphan_work_set.empty()) {
             return true;
         }
     }
 
     // Don't bother if send buffer is too full to respond anyway
     if (pfrom->fPauseSend) {
         return false;
     }
 
     std::list<CNetMessage> msgs;
     {
         LOCK(pfrom->cs_vProcessMsg);
         if (pfrom->vProcessMsg.empty()) {
             return false;
         }
         // Just take one message
         msgs.splice(msgs.begin(), pfrom->vProcessMsg,
                     pfrom->vProcessMsg.begin());
         pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
         pfrom->fPauseRecv =
             pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
         fMoreWork = !pfrom->vProcessMsg.empty();
     }
     CNetMessage &msg(msgs.front());
 
     TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
            pfrom->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
            msg.m_recv.size(), msg.m_recv.data());
 
     if (gArgs.GetBoolArg("-capturemessages", false)) {
         CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv),
                        /*is_incoming=*/true);
     }
 
     msg.SetVersion(pfrom->GetCommonVersion());
 
     // Check network magic
     if (!msg.m_valid_netmagic) {
         LogPrint(BCLog::NET,
                  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
                  SanitizeString(msg.m_type), pfrom->GetId());
 
         // Make sure we discourage where that come from for some time.
         if (m_banman) {
             m_banman->Discourage(pfrom->addr);
         }
         m_connman.DisconnectNode(pfrom->addr);
 
         pfrom->fDisconnect = true;
         return false;
     }
 
     // Check header
     if (!msg.m_valid_header) {
         LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
                  SanitizeString(msg.m_type), pfrom->GetId());
         return fMoreWork;
     }
 
     // Checksum
     CDataStream &vRecv = msg.m_recv;
     if (!msg.m_valid_checksum) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
                  __func__, SanitizeString(msg.m_type), msg.m_message_size,
                  pfrom->GetId());
         if (m_banman) {
             m_banman->Discourage(pfrom->addr);
         }
         m_connman.DisconnectNode(pfrom->addr);
         return fMoreWork;
     }
 
     try {
         ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
                        interruptMsgProc);
         if (interruptMsgProc) {
             return false;
         }
 
         {
             LOCK(peer->m_getdata_requests_mutex);
             if (!peer->m_getdata_requests.empty()) {
                 fMoreWork = true;
             }
         }
     } catch (const std::exception &e) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
                  __func__, SanitizeString(msg.m_type), msg.m_message_size,
                  e.what(), typeid(e).name());
     } catch (...) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
                  __func__, SanitizeString(msg.m_type), msg.m_message_size);
     }
 
     return fMoreWork;
 }
 
 void PeerManagerImpl::ConsiderEviction(CNode &pto,
                                        std::chrono::seconds time_in_seconds) {
     AssertLockHeld(cs_main);
 
     CNodeState &state = *State(pto.GetId());
     const CNetMsgMaker msgMaker(pto.GetCommonVersion());
 
     if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
         state.fSyncStarted) {
         // This is an outbound peer subject to disconnection if they don't
         // announce a block with as much work as the current tip within
         // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
         // chain has more work than ours, we should sync to it, unless it's
         // invalid, in which case we should find that out and disconnect from
         // them elsewhere).
         if (state.pindexBestKnownBlock != nullptr &&
             state.pindexBestKnownBlock->nChainWork >=
                 m_chainman.ActiveChain().Tip()->nChainWork) {
             if (state.m_chain_sync.m_timeout != 0s) {
                 state.m_chain_sync.m_timeout = 0s;
                 state.m_chain_sync.m_work_header = nullptr;
                 state.m_chain_sync.m_sent_getheaders = false;
             }
         } else if (state.m_chain_sync.m_timeout == 0s ||
                    (state.m_chain_sync.m_work_header != nullptr &&
                     state.pindexBestKnownBlock != nullptr &&
                     state.pindexBestKnownBlock->nChainWork >=
                         state.m_chain_sync.m_work_header->nChainWork)) {
             // Our best block known by this peer is behind our tip, and we're
             // either noticing that for the first time, OR this peer was able to
             // catch up to some earlier point where we checked against our tip.
             // Either way, set a new timeout based on current tip.
             state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
             state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
             state.m_chain_sync.m_sent_getheaders = false;
         } else if (state.m_chain_sync.m_timeout > 0s &&
                    time_in_seconds > state.m_chain_sync.m_timeout) {
             // No evidence yet that our peer has synced to a chain with work
             // equal to that of our tip, when we first detected it was behind.
             // Send a single getheaders message to give the peer a chance to
             // update us.
             if (state.m_chain_sync.m_sent_getheaders) {
                 // They've run out of time to catch up!
                 LogPrintf(
                     "Disconnecting outbound peer %d for old chain, best known "
                     "block = %s\n",
                     pto.GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>");
                 pto.fDisconnect = true;
             } else {
                 assert(state.m_chain_sync.m_work_header);
                 LogPrint(
                     BCLog::NET,
                     "sending getheaders to outbound peer=%d to verify chain "
                     "work (current best known block:%s, benchmark blockhash: "
                     "%s)\n",
                     pto.GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>",
                     state.m_chain_sync.m_work_header->GetBlockHash()
                         .ToString());
                 m_connman.PushMessage(
                     &pto,
                     msgMaker.Make(NetMsgType::GETHEADERS,
                                   m_chainman.ActiveChain().GetLocator(
                                       state.m_chain_sync.m_work_header->pprev),
                                   uint256()));
                 state.m_chain_sync.m_sent_getheaders = true;
                 constexpr auto HEADERS_RESPONSE_TIME{2min};
                 // Bump the timeout to allow a response, which could clear the
                 // timeout (if the response shows the peer has synced), reset
                 // the timeout (if the peer syncs to the required work but not
                 // to our tip), or result in disconnect (if we advance to the
                 // timeout and pindexBestKnownBlock has not sufficiently
                 // progressed)
                 state.m_chain_sync.m_timeout =
                     time_in_seconds + HEADERS_RESPONSE_TIME;
             }
         }
     }
 }
 
 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
     // If we have any extra block-relay-only peers, disconnect the youngest
     // unless it's given us a block -- in which case, compare with the
     // second-youngest, and out of those two, disconnect the peer who least
     // recently gave us a block.
     // The youngest block-relay-only peer would be the extra peer we connected
     // to temporarily in order to sync our tip; see net.cpp.
     // Note that we use higher nodeid as a measure for most recent connection.
     if (m_connman.GetExtraBlockRelayCount() > 0) {
         std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
             next_youngest_peer{-1, 0};
 
         m_connman.ForEachNode([&](CNode *pnode) {
             if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
                 return;
             }
             if (pnode->GetId() > youngest_peer.first) {
                 next_youngest_peer = youngest_peer;
                 youngest_peer.first = pnode->GetId();
                 youngest_peer.second = pnode->m_last_block_time;
             }
         });
 
         NodeId to_disconnect = youngest_peer.first;
         if (youngest_peer.second > next_youngest_peer.second) {
             // Our newest block-relay-only peer gave us a block more recently;
             // disconnect our second youngest.
             to_disconnect = next_youngest_peer.first;
         }
 
         m_connman.ForNode(
             to_disconnect,
             [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
                 AssertLockHeld(::cs_main);
                 // Make sure we're not getting a block right now, and that we've
                 // been connected long enough for this eviction to happen at
                 // all. Note that we only request blocks from a peer if we learn
                 // of a valid headers chain with at least as much work as our
                 // tip.
                 CNodeState *node_state = State(pnode->GetId());
                 if (node_state == nullptr ||
                     (now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
                      node_state->nBlocksInFlight == 0)) {
                     pnode->fDisconnect = true;
                     LogPrint(BCLog::NET,
                              "disconnecting extra block-relay-only peer=%d "
                              "(last block received at time %d)\n",
                              pnode->GetId(),
                              count_seconds(pnode->m_last_block_time));
                     return true;
                 } else {
                     LogPrint(
                         BCLog::NET,
                         "keeping block-relay-only peer=%d chosen for eviction "
                         "(connect time: %d, blocks_in_flight: %d)\n",
                         pnode->GetId(), count_seconds(pnode->m_connected),
                         node_state->nBlocksInFlight);
                 }
                 return false;
             });
     }
 
     // Check whether we have too many OUTBOUND_FULL_RELAY peers
     if (m_connman.GetExtraFullOutboundCount() <= 0) {
         return;
     }
 
     // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
     // Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
     // block, with ties broken by choosing the more recent connection (higher
     // node id)
     NodeId worst_peer = -1;
     int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
 
     m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
                               ::cs_main) {
         AssertLockHeld(::cs_main);
 
         // Only consider OUTBOUND_FULL_RELAY peers that are not already marked
         // for disconnection
         if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
             return;
         }
         CNodeState *state = State(pnode->GetId());
         if (state == nullptr) {
             // shouldn't be possible, but just in case
             return;
         }
         // Don't evict our protected peers
         if (state->m_chain_sync.m_protect) {
             return;
         }
         if (state->m_last_block_announcement < oldest_block_announcement ||
             (state->m_last_block_announcement == oldest_block_announcement &&
              pnode->GetId() > worst_peer)) {
             worst_peer = pnode->GetId();
             oldest_block_announcement = state->m_last_block_announcement;
         }
     });
 
     if (worst_peer == -1) {
         return;
     }
 
     bool disconnected = m_connman.ForNode(
         worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
             AssertLockHeld(::cs_main);
 
             // Only disconnect a peer that has been connected to us for some
             // reasonable fraction of our check-frequency, to give it time for
             // new information to have arrived. Also don't disconnect any peer
             // we're trying to download a block from.
             CNodeState &state = *State(pnode->GetId());
             if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
                 state.nBlocksInFlight == 0) {
                 LogPrint(BCLog::NET,
                          "disconnecting extra outbound peer=%d (last block "
                          "announcement received at time %d)\n",
                          pnode->GetId(), oldest_block_announcement);
                 pnode->fDisconnect = true;
                 return true;
             } else {
                 LogPrint(BCLog::NET,
                          "keeping outbound peer=%d chosen for eviction "
                          "(connect time: %d, blocks_in_flight: %d)\n",
                          pnode->GetId(), count_seconds(pnode->m_connected),
                          state.nBlocksInFlight);
                 return false;
             }
         });
 
     if (disconnected) {
         // If we disconnected an extra peer, that means we successfully
         // connected to at least one peer after the last time we detected a
         // stale tip. Don't try any more extra peers until we next detect a
         // stale tip, to limit the load we put on the network from these extra
         // connections.
         m_connman.SetTryNewOutboundPeer(false);
     }
 }
 
 void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
     LOCK(cs_main);
 
     auto now{GetTime<std::chrono::seconds>()};
 
     EvictExtraOutboundPeers(now);
 
     if (now > m_stale_tip_check_time) {
         // Check whether our tip is stale, and if so, allow using an extra
         // outbound peer.
         if (!fImporting && !fReindex && m_connman.GetNetworkActive() &&
             m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
             LogPrintf("Potential stale tip detected, will try using extra "
                       "outbound peer (last tip update: %d seconds ago)\n",
                       count_seconds(now - m_last_tip_update.load()));
             m_connman.SetTryNewOutboundPeer(true);
         } else if (m_connman.GetTryNewOutboundPeer()) {
             m_connman.SetTryNewOutboundPeer(false);
         }
         m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
     }
 
     if (!m_initial_sync_finished && CanDirectFetch()) {
         m_connman.StartExtraBlockRelayPeers();
         m_initial_sync_finished = true;
     }
 }
 
 void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
                                     std::chrono::microseconds now) {
     if (m_connman.ShouldRunInactivityChecks(
             node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
         peer.m_ping_nonce_sent &&
         now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
         // The ping timeout is using mocktime. To disable the check during
         // testing, increase -peertimeout.
         LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
                  0.000001 * count_microseconds(now - peer.m_ping_start.load()),
                  peer.m_id);
         node_to.fDisconnect = true;
         return;
     }
 
     const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
     bool pingSend = false;
 
     if (peer.m_ping_queued) {
         // RPC ping request by user
         pingSend = true;
     }
 
     if (peer.m_ping_nonce_sent == 0 &&
         now > peer.m_ping_start.load() + PING_INTERVAL) {
         // Ping automatically sent as a latency probe & keepalive.
         pingSend = true;
     }
 
     if (pingSend) {
         uint64_t nonce;
         do {
             nonce = GetRand<uint64_t>();
         } while (nonce == 0);
         peer.m_ping_queued = false;
         peer.m_ping_start = now;
         if (node_to.GetCommonVersion() > BIP0031_VERSION) {
             peer.m_ping_nonce_sent = nonce;
             m_connman.PushMessage(&node_to,
                                   msgMaker.Make(NetMsgType::PING, nonce));
         } else {
             // Peer is too old to support ping command with nonce, pong will
             // never arrive.
             peer.m_ping_nonce_sent = 0;
             m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
         }
     }
 }
 
 void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
                                     std::chrono::microseconds current_time) {
     // Nothing to do for non-address-relay peers
     if (!peer.m_addr_relay_enabled) {
         return;
     }
 
     LOCK(peer.m_addr_send_times_mutex);
     if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
         peer.m_next_local_addr_send < current_time) {
         // If we've sent before, clear the bloom filter for the peer, so
         // that our self-announcement will actually go out. This might
         // be unnecessary if the bloom filter has already rolled over
         // since our last self-announcement, but there is only a small
         // bandwidth cost that we can incur by doing this (which happens
         // once a day on average).
         if (peer.m_next_local_addr_send != 0us) {
             peer.m_addr_known->reset();
         }
         if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
             CAddress local_addr{*local_service, peer.m_our_services,
                                 (uint32_t)GetAdjustedTime()};
             FastRandomContext insecure_rand;
             PushAddress(peer, local_addr, insecure_rand);
         }
         peer.m_next_local_addr_send = GetExponentialRand(
             current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
     }
 
     // We sent an `addr` message to this peer recently. Nothing more to do.
     if (current_time <= peer.m_next_addr_send) {
         return;
     }
 
     peer.m_next_addr_send =
         GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
 
     const size_t max_addr_to_send = GetMaxAddrToSend();
     if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
         // Should be impossible since we always check size before adding to
         // m_addrs_to_send. Recover by trimming the vector.
         peer.m_addrs_to_send.resize(max_addr_to_send);
     }
 
     // Remove addr records that the peer already knows about, and add new
     // addrs to the m_addr_known filter on the same pass.
     auto addr_already_known = [&peer](const CAddress &addr) {
         bool ret = peer.m_addr_known->contains(addr.GetKey());
         if (!ret) {
             peer.m_addr_known->insert(addr.GetKey());
         }
         return ret;
     };
     peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
                                               peer.m_addrs_to_send.end(),
                                               addr_already_known),
                                peer.m_addrs_to_send.end());
 
     // No addr messages to send
     if (peer.m_addrs_to_send.empty()) {
         return;
     }
 
     const char *msg_type;
     int make_flags;
     if (peer.m_wants_addrv2) {
         msg_type = NetMsgType::ADDRV2;
         make_flags = ADDRV2_FORMAT;
     } else {
         msg_type = NetMsgType::ADDR;
         make_flags = 0;
     }
     m_connman.PushMessage(
         &node, CNetMsgMaker(node.GetCommonVersion())
                    .Make(make_flags, msg_type, peer.m_addrs_to_send));
     peer.m_addrs_to_send.clear();
 
     // we only send the big addr message once
     if (peer.m_addrs_to_send.capacity() > 40) {
         peer.m_addrs_to_send.shrink_to_fit();
     }
 }
 
 void PeerManagerImpl::MaybeSendFeefilter(
     CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
     if (m_ignore_incoming_txs) {
         return;
     }
     if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
         return;
     }
     // peers with the forcerelay permission should not filter txs to us
     if (pto.HasPermission(NetPermissionFlags::ForceRelay)) {
         return;
     }
     // Don't send feefilter messages to outbound block-relay-only peers since
     // they should never announce transactions to us, regardless of feefilter
     // state.
     if (pto.IsBlockOnlyConn()) {
         return;
     }
 
     Amount currentFilter =
         m_mempool
             .GetMinFee(
                 gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
                 1000000)
             .GetFeePerK();
     static FeeFilterRounder g_filter_rounder{
         CFeeRate{DEFAULT_MIN_RELAY_TX_FEE_PER_KB}};
 
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
         // Received tx-inv messages are discarded when the active
         // chainstate is in IBD, so tell the peer to not send them.
         currentFilter = MAX_MONEY;
     } else {
         static const Amount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
         if (peer.m_fee_filter_sent == MAX_FILTER) {
             // Send the current filter if we sent MAX_FILTER previously
             // and made it out of IBD.
             peer.m_next_send_feefilter = 0us;
         }
     }
     if (current_time > peer.m_next_send_feefilter) {
         Amount filterToSend = g_filter_rounder.round(currentFilter);
         // We always have a fee filter of at least minRelayTxFee
         filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
         if (filterToSend != peer.m_fee_filter_sent) {
             m_connman.PushMessage(
                 &pto, CNetMsgMaker(pto.GetCommonVersion())
                           .Make(NetMsgType::FEEFILTER, filterToSend));
             peer.m_fee_filter_sent = filterToSend;
         }
         peer.m_next_send_feefilter =
             GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
     }
     // If the fee filter has changed substantially and it's still more than
     // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
     // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
     else if (current_time + MAX_FEEFILTER_CHANGE_DELAY <
                  peer.m_next_send_feefilter &&
              (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
               currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
         peer.m_next_send_feefilter =
             current_time + GetRandomDuration<std::chrono::microseconds>(
                                MAX_FEEFILTER_CHANGE_DELAY);
     }
 }
 
 namespace {
 class CompareInvMempoolOrder {
     CTxMemPool *mp;
 
 public:
     explicit CompareInvMempoolOrder(CTxMemPool *_mempool) : mp(_mempool) {}
 
     bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
         /**
          * As std::make_heap produces a max-heap, we want the entries which
          * are topologically earlier to sort later.
          */
         return mp->CompareTopologically(*b, *a);
     }
 };
 } // namespace
 
 bool PeerManagerImpl::SetupAddressRelay(const CNode &node, Peer &peer) {
     // We don't participate in addr relay with outbound block-relay-only
     // connections to prevent providing adversaries with the additional
     // information of addr traffic to infer the link.
     if (node.IsBlockOnlyConn()) {
         return false;
     }
 
     if (!peer.m_addr_relay_enabled.exchange(true)) {
         // First addr message we have received from the peer, initialize
         // m_addr_known
         peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
     }
 
     return true;
 }
 
 bool PeerManagerImpl::SendMessages(const Config &config, CNode *pto) {
     PeerRef peer = GetPeerRef(pto->GetId());
     if (!peer) {
         return false;
     }
     const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
 
     // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
     // disconnect misbehaving peers even before the version handshake is
     // complete.
     if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
         return true;
     }
 
     // Don't send anything until the version handshake is complete
     if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
         return true;
     }
 
     // If we get here, the outgoing message serialization version is set and
     // can't change.
     const CNetMsgMaker msgMaker(pto->GetCommonVersion());
 
     const auto current_time{GetTime<std::chrono::microseconds>()};
 
     if (pto->IsAddrFetchConn() &&
         current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
         LogPrint(BCLog::NET,
                  "addrfetch connection timeout; disconnecting peer=%d\n",
                  pto->GetId());
         pto->fDisconnect = true;
         return true;
     }
 
     MaybeSendPing(*pto, *peer, current_time);
 
     // MaybeSendPing may have marked peer for disconnection
     if (pto->fDisconnect) {
         return true;
     }
 
     bool sync_blocks_and_headers_from_peer = false;
 
     MaybeSendAddr(*pto, *peer, current_time);
 
     {
         LOCK(cs_main);
 
         CNodeState &state = *State(pto->GetId());
 
         // Start block sync
         if (m_chainman.m_best_header == nullptr) {
             m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
         }
 
         // Determine whether we might try initial headers sync or parallel
         // block download from this peer -- this mostly affects behavior while
         // in IBD (once out of IBD, we sync from all peers).
         if (state.fPreferredDownload) {
             sync_blocks_and_headers_from_peer = true;
         } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
             // Typically this is an inbound peer. If we don't have any outbound
             // peers, or if we aren't downloading any blocks from such peers,
             // then allow block downloads from this peer, too.
             // We prefer downloading blocks from outbound peers to avoid
             // putting undue load on (say) some home user who is just making
             // outbound connections to the network, but if our only source of
             // the latest blocks is from an inbound peer, we have to be sure to
             // eventually download it (and not just wait indefinitely for an
             // outbound peer to have it).
             if (m_num_preferred_download_peers == 0 ||
                 mapBlocksInFlight.empty()) {
                 sync_blocks_and_headers_from_peer = true;
             }
         }
 
         if (!state.fSyncStarted && CanServeBlocks(*peer) && !fImporting &&
             !fReindex) {
             // Only actively request headers from a single peer, unless we're
             // close to today.
             if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
                 m_chainman.m_best_header->GetBlockTime() >
                     GetAdjustedTime() - 24 * 60 * 60) {
                 state.fSyncStarted = true;
                 state.m_headers_sync_timeout =
                     current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
                     (
                         // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to
                         // microseconds before scaling to maintain precision
                         std::chrono::microseconds{
                             HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
                         (GetAdjustedTime() -
                          m_chainman.m_best_header->GetBlockTime()) /
                         consensusParams.nPowTargetSpacing);
                 nSyncStarted++;
                 const CBlockIndex *pindexStart = m_chainman.m_best_header;
                 /**
                  * If possible, start at the block preceding the currently best
                  * known header. This ensures that we always get a non-empty
                  * list of headers back as long as the peer is up-to-date. With
                  * a non-empty response, we can initialise the peer's known best
                  * block. This wouldn't be possible if we requested starting at
                  * m_best_header and got back an empty response.
                  */
                 if (pindexStart->pprev) {
                     pindexStart = pindexStart->pprev;
                 }
 
                 LogPrint(
                     BCLog::NET,
                     "initial getheaders (%d) to peer=%d (startheight:%d)\n",
                     pindexStart->nHeight, pto->GetId(),
                     peer->m_starting_height);
                 m_connman.PushMessage(
                     pto, msgMaker.Make(
                              NetMsgType::GETHEADERS,
                              m_chainman.ActiveChain().GetLocator(pindexStart),
                              uint256()));
             }
         }
 
         //
         // Try sending block announcements via headers
         //
         {
             // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
             // hashes we're relaying, and our peer wants headers announcements,
             // then find the first header not yet known to our peer but would
             // connect, and send. If no header would connect, or if we have too
             // many blocks, or if the peer doesn't want headers, just add all to
             // the inv queue.
             LOCK(peer->m_block_inv_mutex);
             std::vector<CBlock> vHeaders;
             bool fRevertToInv =
                 ((!state.fPreferHeaders &&
                   (!state.m_requested_hb_cmpctblocks ||
                    peer->m_blocks_for_headers_relay.size() > 1)) ||
                  peer->m_blocks_for_headers_relay.size() >
                      MAX_BLOCKS_TO_ANNOUNCE);
             // last header queued for delivery
             const CBlockIndex *pBestIndex = nullptr;
             // ensure pindexBestKnownBlock is up-to-date
             ProcessBlockAvailability(pto->GetId());
 
             if (!fRevertToInv) {
                 bool fFoundStartingHeader = false;
                 // Try to find first header that our peer doesn't have, and then
                 // send all headers past that one. If we come across an headers
                 // that aren't on m_chainman.ActiveChain(), give up.
                 for (const BlockHash &hash : peer->m_blocks_for_headers_relay) {
                     const CBlockIndex *pindex =
                         m_chainman.m_blockman.LookupBlockIndex(hash);
                     assert(pindex);
                     if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
                         // Bail out if we reorged away from this block
                         fRevertToInv = true;
                         break;
                     }
                     if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
                         // This means that the list of blocks to announce don't
                         // connect to each other. This shouldn't really be
                         // possible to hit during regular operation (because
                         // reorgs should take us to a chain that has some block
                         // not on the prior chain, which should be caught by the
                         // prior check), but one way this could happen is by
                         // using invalidateblock / reconsiderblock repeatedly on
                         // the tip, causing it to be added multiple times to
                         // m_blocks_for_headers_relay. Robustly deal with this
                         // rare situation by reverting to an inv.
                         fRevertToInv = true;
                         break;
                     }
                     pBestIndex = pindex;
                     if (fFoundStartingHeader) {
                         // add this to the headers message
                         vHeaders.push_back(pindex->GetBlockHeader());
                     } else if (PeerHasHeader(&state, pindex)) {
                         // Keep looking for the first new block.
                         continue;
                     } else if (pindex->pprev == nullptr ||
                                PeerHasHeader(&state, pindex->pprev)) {
                         // Peer doesn't have this header but they do have the
                         // prior one. Start sending headers.
                         fFoundStartingHeader = true;
                         vHeaders.push_back(pindex->GetBlockHeader());
                     } else {
                         // Peer doesn't have this header or the prior one --
                         // nothing will connect, so bail out.
                         fRevertToInv = true;
                         break;
                     }
                 }
             }
             if (!fRevertToInv && !vHeaders.empty()) {
                 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
                     // We only send up to 1 block as header-and-ids, as
                     // otherwise probably means we're doing an initial-ish-sync
                     // or they're slow.
                     LogPrint(BCLog::NET,
                              "%s sending header-and-ids %s to peer=%d\n",
                              __func__, vHeaders.front().GetHash().ToString(),
                              pto->GetId());
 
                     std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
                     {
                         LOCK(m_most_recent_block_mutex);
                         if (m_most_recent_block_hash ==
                             pBestIndex->GetBlockHash()) {
                             cached_cmpctblock_msg =
                                 msgMaker.Make(NetMsgType::CMPCTBLOCK,
                                               *m_most_recent_compact_block);
                         }
                     }
                     if (cached_cmpctblock_msg.has_value()) {
                         m_connman.PushMessage(
                             pto, std::move(cached_cmpctblock_msg.value()));
                     } else {
                         CBlock block;
                         bool ret = ReadBlockFromDisk(block, pBestIndex,
                                                      consensusParams);
                         assert(ret);
                         CBlockHeaderAndShortTxIDs cmpctblock(block);
                         m_connman.PushMessage(
                             pto,
                             msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
                     }
                     state.pindexBestHeaderSent = pBestIndex;
                 } else if (state.fPreferHeaders) {
                     if (vHeaders.size() > 1) {
                         LogPrint(BCLog::NET,
                                  "%s: %u headers, range (%s, %s), to peer=%d\n",
                                  __func__, vHeaders.size(),
                                  vHeaders.front().GetHash().ToString(),
                                  vHeaders.back().GetHash().ToString(),
                                  pto->GetId());
                     } else {
                         LogPrint(BCLog::NET,
                                  "%s: sending header %s to peer=%d\n", __func__,
                                  vHeaders.front().GetHash().ToString(),
                                  pto->GetId());
                     }
                     m_connman.PushMessage(
                         pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
                     state.pindexBestHeaderSent = pBestIndex;
                 } else {
                     fRevertToInv = true;
                 }
             }
             if (fRevertToInv) {
                 // If falling back to using an inv, just try to inv the tip. The
                 // last entry in m_blocks_for_headers_relay was our tip at some
                 // point in the past.
                 if (!peer->m_blocks_for_headers_relay.empty()) {
                     const BlockHash &hashToAnnounce =
                         peer->m_blocks_for_headers_relay.back();
                     const CBlockIndex *pindex =
                         m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
                     assert(pindex);
 
                     // Warn if we're announcing a block that is not on the main
                     // chain. This should be very rare and could be optimized
                     // out. Just log for now.
                     if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
                         LogPrint(
                             BCLog::NET,
                             "Announcing block %s not on main chain (tip=%s)\n",
                             hashToAnnounce.ToString(),
                             m_chainman.ActiveChain()
                                 .Tip()
                                 ->GetBlockHash()
                                 .ToString());
                     }
 
                     // If the peer's chain has this block, don't inv it back.
                     if (!PeerHasHeader(&state, pindex)) {
                         peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
                         LogPrint(BCLog::NET,
                                  "%s: sending inv peer=%d hash=%s\n", __func__,
                                  pto->GetId(), hashToAnnounce.ToString());
                     }
                 }
             }
             peer->m_blocks_for_headers_relay.clear();
         }
     } // release cs_main
 
     //
     // Message: inventory
     //
     std::vector<CInv> vInv;
     auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
         vInv.emplace_back(type, hash);
         if (vInv.size() == MAX_INV_SZ) {
             m_connman.PushMessage(
                 pto, msgMaker.Make(NetMsgType::INV, std::move(vInv)));
             vInv.clear();
         }
     };
 
     {
         LOCK(cs_main);
 
         {
             LOCK(peer->m_block_inv_mutex);
 
             vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
                                           INVENTORY_BROADCAST_MAX_PER_MB *
                                               config.GetMaxBlockSize() /
                                               1000000));
 
             // Add blocks
             for (const BlockHash &hash : peer->m_blocks_for_inv_relay) {
                 addInvAndMaybeFlush(MSG_BLOCK, hash);
             }
             peer->m_blocks_for_inv_relay.clear();
         }
 
         auto computeNextInvSendTime =
             [&](std::chrono::microseconds &next) -> bool {
             bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
 
             if (next < current_time) {
                 fSendTrickle = true;
                 if (pto->IsInboundConn()) {
                     next = NextInvToInbounds(
                         current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
                 } else {
                     // Skip delay for outbound peers, as there is less privacy
                     // concern for them.
                     next = current_time;
                 }
             }
 
             return fSendTrickle;
         };
 
         // Add proofs to inventory
         if (peer->m_proof_relay != nullptr) {
             LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
 
             if (computeNextInvSendTime(
                     peer->m_proof_relay->m_next_inv_send_time)) {
                 auto it =
                     peer->m_proof_relay->m_proof_inventory_to_send.begin();
                 while (it !=
                        peer->m_proof_relay->m_proof_inventory_to_send.end()) {
                     const avalanche::ProofId proofid = *it;
 
                     it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
                         it);
 
                     if (peer->m_proof_relay->m_proof_inventory_known_filter
                             .contains(proofid)) {
                         continue;
                     }
 
                     peer->m_proof_relay->m_proof_inventory_known_filter.insert(
                         proofid);
                     addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
                     State(pto->GetId())
                         ->m_recently_announced_proofs.insert(proofid);
                 }
             }
         }
 
         if (auto tx_relay = peer->GetTxRelay()) {
             LOCK(tx_relay->m_tx_inventory_mutex);
             // Check whether periodic sends should happen
             const bool fSendTrickle =
                 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
 
             // Time to send but the peer has requested we not relay
             // transactions.
             if (fSendTrickle) {
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 if (!tx_relay->m_relay_txs) {
                     tx_relay->m_tx_inventory_to_send.clear();
                 }
             }
 
             // Respond to BIP35 mempool requests
             if (fSendTrickle && tx_relay->m_send_mempool) {
                 auto vtxinfo = m_mempool.infoAll();
                 tx_relay->m_send_mempool = false;
                 const CFeeRate filterrate{
                     tx_relay->m_fee_filter_received.load()};
 
                 LOCK(tx_relay->m_bloom_filter_mutex);
 
                 for (const auto &txinfo : vtxinfo) {
                     const TxId &txid = txinfo.tx->GetId();
                     tx_relay->m_tx_inventory_to_send.erase(txid);
                     // Don't send transactions that peers will not put into
                     // their mempool
                     if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
                         continue;
                     }
                     if (tx_relay->m_bloom_filter &&
                         !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     tx_relay->m_tx_inventory_known_filter.insert(txid);
                     // Responses to MEMPOOL requests bypass the
                     // m_recently_announced_invs filter.
                     addInvAndMaybeFlush(MSG_TX, txid);
                 }
                 tx_relay->m_last_mempool_req =
                     std::chrono::duration_cast<std::chrono::seconds>(
                         current_time);
             }
 
             // Determine transactions to relay
             if (fSendTrickle) {
                 // Produce a vector with all candidates for sending
                 std::vector<std::set<TxId>::iterator> vInvTx;
                 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
                 for (std::set<TxId>::iterator it =
                          tx_relay->m_tx_inventory_to_send.begin();
                      it != tx_relay->m_tx_inventory_to_send.end(); it++) {
                     vInvTx.push_back(it);
                 }
                 const CFeeRate filterrate{
                     tx_relay->m_fee_filter_received.load()};
                 // Send out the inventory in the order of admission to our
                 // mempool, which is guaranteed to be a topological sort order.
                 // A heap is used so that not all items need sorting if only a
                 // few are being sent.
                 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
                 std::make_heap(vInvTx.begin(), vInvTx.end(),
                                compareInvMempoolOrder);
                 // No reason to drain out at many times the network's
                 // capacity, especially since we have many peers and some
                 // will draw much shorter delays.
                 unsigned int nRelayedTransactions = 0;
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 while (!vInvTx.empty() &&
                        nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
                                                   config.GetMaxBlockSize() /
                                                   1000000) {
                     // Fetch the top element from the heap
                     std::pop_heap(vInvTx.begin(), vInvTx.end(),
                                   compareInvMempoolOrder);
                     std::set<TxId>::iterator it = vInvTx.back();
                     vInvTx.pop_back();
                     const TxId txid = *it;
                     // Remove it from the to-be-sent set
                     tx_relay->m_tx_inventory_to_send.erase(it);
                     // Check if not in the filter already
                     if (tx_relay->m_tx_inventory_known_filter.contains(txid)) {
                         continue;
                     }
                     // Not in the mempool anymore? don't bother sending it.
                     auto txinfo = m_mempool.info(txid);
                     if (!txinfo.tx) {
                         continue;
                     }
                     // Peer told you to not send transactions at that
                     // feerate? Don't bother sending it.
                     if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
                         continue;
                     }
                     if (tx_relay->m_bloom_filter &&
                         !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     // Send
                     State(pto->GetId())->m_recently_announced_invs.insert(txid);
                     addInvAndMaybeFlush(MSG_TX, txid);
                     nRelayedTransactions++;
                     {
                         // Expire old relay messages
                         while (!g_relay_expiration.empty() &&
                                g_relay_expiration.front().first <
                                    current_time) {
                             mapRelay.erase(g_relay_expiration.front().second);
                             g_relay_expiration.pop_front();
                         }
 
                         auto ret = mapRelay.insert(
                             std::make_pair(txid, std::move(txinfo.tx)));
                         if (ret.second) {
                             g_relay_expiration.push_back(std::make_pair(
                                 current_time + RELAY_TX_CACHE_TIME, ret.first));
                         }
                     }
                     tx_relay->m_tx_inventory_known_filter.insert(txid);
                 }
             }
         }
     } // release cs_main
 
     if (!vInv.empty()) {
         m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
     }
 
     {
         LOCK(cs_main);
 
         CNodeState &state = *State(pto->GetId());
 
         // Detect whether we're stalling
         if (state.m_stalling_since.count() &&
             state.m_stalling_since < current_time - BLOCK_STALLING_TIMEOUT) {
             // Stalling only triggers when the block download window cannot
             // move. During normal steady state, the download window should be
             // much larger than the to-be-downloaded set of blocks, so
             // disconnection should only happen during initial block download.
             LogPrintf("Peer=%d is stalling block download, disconnecting\n",
                       pto->GetId());
             pto->fDisconnect = true;
             return true;
         }
         // In case there is a block that has been in flight from this peer for
         // block_interval * (1 + 0.5 * N) (with N the number of peers from which
         // we're downloading validated blocks), disconnect due to timeout.
         // We compensate for other peers to prevent killing off peers due to our
         // own downstream link being saturated. We only count validated
         // in-flight blocks so peers can't advertise non-existing block hashes
         // to unreasonably increase our timeout.
         if (state.vBlocksInFlight.size() > 0) {
             QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
             int nOtherPeersWithValidatedDownloads =
                 m_peers_downloading_from - 1;
             if (current_time >
                 state.m_downloading_since +
                     std::chrono::seconds{consensusParams.nPowTargetSpacing} *
                         (BLOCK_DOWNLOAD_TIMEOUT_BASE +
                          BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
                              nOtherPeersWithValidatedDownloads)) {
                 LogPrintf("Timeout downloading block %s from peer=%d, "
                           "disconnecting\n",
                           queuedBlock.pindex->GetBlockHash().ToString(),
                           pto->GetId());
                 pto->fDisconnect = true;
                 return true;
             }
         }
 
         // Check for headers sync timeouts
         if (state.fSyncStarted &&
             state.m_headers_sync_timeout < std::chrono::microseconds::max()) {
             // Detect whether this is a stalling initial-headers-sync peer
             if (m_chainman.m_best_header->GetBlockTime() <=
                 GetAdjustedTime() - 24 * 60 * 60) {
                 if (current_time > state.m_headers_sync_timeout &&
                     nSyncStarted == 1 &&
                     (m_num_preferred_download_peers -
                          state.fPreferredDownload >=
                      1)) {
                     // Disconnect a peer (without NetPermissionFlags::NoBan
                     // permission) if it is our only sync peer, and we have
                     // others we could be using instead. Note: If all our peers
                     // are inbound, then we won't disconnect our sync peer for
                     // stalling; we have bigger problems if we can't get any
                     // outbound peers.
                     if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
                         LogPrintf("Timeout downloading headers from peer=%d, "
                                   "disconnecting\n",
                                   pto->GetId());
                         pto->fDisconnect = true;
                         return true;
                     } else {
                         LogPrintf("Timeout downloading headers from noban "
                                   "peer=%d, not disconnecting\n",
                                   pto->GetId());
                         // Reset the headers sync state so that we have a chance
                         // to try downloading from a different peer. Note: this
                         // will also result in at least one more getheaders
                         // message to be sent to this peer (eventually).
                         state.fSyncStarted = false;
                         nSyncStarted--;
                         state.m_headers_sync_timeout = 0us;
                     }
                 }
             } else {
                 // After we've caught up once, reset the timeout so we can't
                 // trigger disconnect later.
                 state.m_headers_sync_timeout = std::chrono::microseconds::max();
             }
         }
 
         // Check that outbound peers have reasonable chains GetTime() is used by
         // this anti-DoS logic so we can test this using mocktime.
         ConsiderEviction(*pto, GetTime<std::chrono::seconds>());
     } // release cs_main
 
     std::vector<CInv> vGetData;
 
     //
     // Message: getdata (blocks)
     //
     {
         LOCK(cs_main);
 
         CNodeState &state = *State(pto->GetId());
 
         if (CanServeBlocks(*peer) &&
             ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
              !m_chainman.ActiveChainstate().IsInitialBlockDownload()) &&
             state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             std::vector<const CBlockIndex *> vToDownload;
             NodeId staller = -1;
             FindNextBlocksToDownload(pto->GetId(),
                                      MAX_BLOCKS_IN_TRANSIT_PER_PEER -
                                          state.nBlocksInFlight,
                                      vToDownload, staller);
             for (const CBlockIndex *pindex : vToDownload) {
                 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
                 BlockRequested(config, pto->GetId(), *pindex);
                 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
                          pindex->GetBlockHash().ToString(), pindex->nHeight,
                          pto->GetId());
             }
             if (state.nBlocksInFlight == 0 && staller != -1) {
                 if (State(staller)->m_stalling_since == 0us) {
                     State(staller)->m_stalling_since = current_time;
                     LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
                 }
             }
         }
     } // release cs_main
 
     auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
         CInv inv(type, hash);
         LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
                  pto->GetId());
         vGetData.push_back(std::move(inv));
         if (vGetData.size() >= MAX_GETDATA_SZ) {
             m_connman.PushMessage(
                 pto, msgMaker.Make(NetMsgType::GETDATA, std::move(vGetData)));
             vGetData.clear();
         }
     };
 
     //
     // Message: getdata (proof)
     //
     {
         LOCK(cs_proofrequest);
         std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
         auto requestable =
             m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
         for (const auto &entry : expired) {
             LogPrint(BCLog::AVALANCHE,
                      "timeout of inflight proof %s from peer=%d\n",
                      entry.second.ToString(), entry.first);
         }
         for (const auto &proofid : requestable) {
             if (!AlreadyHaveProof(proofid)) {
                 addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
                 m_proofrequest.RequestedData(
                     pto->GetId(), proofid,
                     current_time + PROOF_REQUEST_PARAMS.getdata_interval);
             } else {
                 // We have already seen this proof, no need to download.
                 // This is just a belt-and-suspenders, as this should
                 // already be called whenever a proof becomes
                 // AlreadyHaveProof().
                 m_proofrequest.ForgetInvId(proofid);
             }
         }
     } // release cs_proofrequest
 
     //
     // Message: getdata (transactions)
     //
     {
         LOCK(cs_main);
         std::vector<std::pair<NodeId, TxId>> expired;
         auto requestable =
             m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
         for (const auto &entry : expired) {
             LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
                      entry.second.ToString(), entry.first);
         }
         for (const TxId &txid : requestable) {
             if (!AlreadyHaveTx(txid)) {
                 addGetDataAndMaybeFlush(MSG_TX, txid);
                 m_txrequest.RequestedData(
                     pto->GetId(), txid,
                     current_time + TX_REQUEST_PARAMS.getdata_interval);
             } else {
                 // We have already seen this transaction, no need to download.
                 // This is just a belt-and-suspenders, as this should already be
                 // called whenever a transaction becomes AlreadyHaveTx().
                 m_txrequest.ForgetInvId(txid);
             }
         }
 
         if (!vGetData.empty()) {
             m_connman.PushMessage(pto,
                                   msgMaker.Make(NetMsgType::GETDATA, vGetData));
         }
 
     } // release cs_main
     MaybeSendFeefilter(*pto, *peer, current_time);
     return true;
 }
 
 bool PeerManagerImpl::ReceivedAvalancheProof(CNode &node, Peer &peer,
                                              const avalanche::ProofRef &proof) {
     assert(proof != nullptr);
 
     const avalanche::ProofId &proofid = proof->getId();
 
     AddKnownProof(peer, proofid);
 
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
         // We cannot reliably verify proofs during IBD, so bail out early and
         // keep the inventory as pending so it can be requested when the node
         // has synced.
         return true;
     }
 
     const NodeId nodeid = node.GetId();
 
     auto saveProofIfOutbound = [](const CNode &node,
                                   const avalanche::ProofId &proofid,
                                   const NodeId nodeid) -> bool {
         if (node.IsAvalancheOutboundConnection() || node.IsManualConn()) {
             LogPrint(BCLog::AVALANCHE, "Saving remote proof %s\n",
                      proofid.ToString());
             return g_avalanche->withPeerManager(
                 [&](avalanche::PeerManager &pm) {
                     return pm.saveRemoteProof(proofid, nodeid, true);
                 });
         }
 
         return false;
     };
 
     {
         LOCK(cs_proofrequest);
         m_proofrequest.ReceivedResponse(nodeid, proofid);
 
         if (AlreadyHaveProof(proofid)) {
             m_proofrequest.ForgetInvId(proofid);
             saveProofIfOutbound(node, proofid, nodeid);
             return true;
         }
     }
 
     // registerProof should not be called while cs_proofrequest because it
     // holds cs_main and that creates a potential deadlock during shutdown
 
     avalanche::ProofRegistrationState state;
     if (g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
             return pm.registerProof(proof, state);
         })) {
         WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
         RelayProof(proofid);
 
         node.m_last_proof_time = GetTime<std::chrono::seconds>();
 
         LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
                  nodeid, proofid.ToString());
     }
 
     if (state.GetResult() == avalanche::ProofRegistrationResult::INVALID) {
         g_avalanche->withPeerManager(
             [&](avalanche::PeerManager &pm) { pm.setInvalid(proofid); });
         Misbehaving(nodeid, 100, state.GetRejectReason());
         return false;
     }
 
     if (state.GetResult() == avalanche::ProofRegistrationResult::MISSING_UTXO) {
         // This is possible that a proof contains a utxo we don't know yet, so
         // don't ban for this.
         return false;
     }
 
     if (!g_avalanche->reconcileOrFinalize(proof)) {
         LogPrint(BCLog::AVALANCHE,
                  "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
                  state.IsValid() ? "not-worth-polling"
                                  : state.GetRejectReason(),
                  nodeid, proofid.ToString());
     }
 
     saveProofIfOutbound(node, proofid, nodeid);
 
     return true;
 }
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index 6a31670dd..3182d3e88 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -1,1373 +1,1375 @@
 // Copyright (c) 2012-2019 The Bitcoin Core developers
 // Copyright (c) 2017-2019 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 #include <net.h>
 
 #include <addrman.h>
 #include <avalanche/avalanche.h>
 #include <avalanche/processor.h>
 #include <avalanche/statistics.h>
 #include <chainparams.h>
 #include <clientversion.h>
 #include <compat.h>
 #include <config.h>
 #include <net_processing.h>
 #include <netaddress.h>
 #include <netbase.h>
 #include <netmessagemaker.h>
 #include <serialize.h>
 #include <span.h>
 #include <streams.h>
 #include <test/util/validation.h>
+#include <threadsafety.h>
 #include <timedata.h>
 #include <util/strencodings.h>
 #include <util/string.h>
 #include <util/translation.h> // for bilingual_str
 #include <version.h>
 
 #include <test/util/setup_common.h>
 
 #include <boost/test/unit_test.hpp>
 
 #include <algorithm>
 #include <chrono>
 #include <cmath>
 #include <condition_variable>
 #include <cstdint>
 #include <functional>
 #include <ios>
 #include <memory>
 #include <string>
 
 using namespace std::literals;
 
 static CNetAddr ip(uint32_t ip) {
     struct in_addr s;
     s.s_addr = ip;
     return CNetAddr(s);
 }
 
 namespace {
 struct CConnmanTest : public CConnman {
     using CConnman::CConnman;
 
     Mutex cs;
     size_t outboundFullRelayCount GUARDED_BY(cs);
     size_t avalancheOutboundsCount GUARDED_BY(cs);
 
     std::condition_variable cvar;
 
     NodeId nodeid = 0;
 
     void AddNode(ConnectionType type) {
         CAddress addr(
             CService(ip(GetRand<uint32_t>()), Params().GetDefaultPort()),
             NODE_NONE);
 
         return AddNode(addr, type);
     }
 
     void AddNode(const CAddress &addr, ConnectionType type) {
         CNode *pnode = new CNode(nodeid++, INVALID_SOCKET, addr,
                                  CalculateKeyedNetGroup(addr),
                                  /* nLocalHostNonceIn */ 0,
                                  /* nLocalExtraEntropyIn */ 0, addr,
                                  /* pszDest */ "", type,
                                  /* inbound_onion */ false);
 
         LOCK(m_nodes_mutex);
         m_nodes.push_back(pnode);
         pnode->fSuccessfullyConnected = true;
     }
 
     void ClearNodes() {
         LOCK(m_nodes_mutex);
         for (CNode *node : m_nodes) {
             delete node;
         }
         m_nodes.clear();
     }
 
     void SetMaxOutbounds(int maxFullRelayOutbounds, int maxAvalancheOutbounds) {
         Options options;
         options.nMaxConnections = DEFAULT_MAX_PEER_CONNECTIONS;
         options.m_max_outbound_full_relay = maxFullRelayOutbounds;
         options.m_max_avalanche_outbound = maxAvalancheOutbounds;
         Init(options);
     };
 
     void MakeAddrmanDeterministic() { addrman.MakeDeterministic(); }
 
     void Init(const Options &connOptions) {
         CConnman::Init(connOptions);
 
         if (semOutbound == nullptr) {
             // initialize semaphore
             semOutbound = std::make_unique<CSemaphore>(
                 std::min(m_max_outbound, nMaxConnections));
         }
         if (semAddnode == nullptr) {
             // initialize semaphore
             semAddnode = std::make_unique<CSemaphore>(nMaxAddnode);
         }
     }
 
     void openNetworkConnection(const CAddress &addrConnect,
-                               ConnectionType connType) {
+                               ConnectionType connType)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs) {
         bool newConnection = !AlreadyConnectedToAddress(addrConnect);
         addrman.Attempt(addrConnect, true);
 
         if (newConnection) {
             {
                 LOCK(cs);
 
                 if (connType == ConnectionType::AVALANCHE_OUTBOUND) {
                     avalancheOutboundsCount++;
                 }
                 if (connType == ConnectionType::OUTBOUND_FULL_RELAY) {
                     outboundFullRelayCount++;
                 }
             }
 
             AddNode(addrConnect, connType);
             BOOST_CHECK(AlreadyConnectedToAddress(addrConnect));
             addrman.Connected(addrConnect);
         }
 
         cvar.notify_all();
     }
 
     struct TestAddresses {
         uint32_t group;
         uint32_t services;
         size_t quantity;
     };
 
     bool checkContiguousAddressesConnection(
         const std::vector<TestAddresses> &testAddresses,
         size_t expectedOutboundFullRelayCount,
-        size_t expectedAvalancheOutboundsCount) {
+        size_t expectedAvalancheOutboundsCount) EXCLUSIVE_LOCKS_REQUIRED(!cs) {
         {
             LOCK(cs);
 
             // Reset
             outboundFullRelayCount = 0;
             avalancheOutboundsCount = 0;
         }
 
         addrman.Clear();
         ClearNodes();
 
         struct IpGen {
             uint32_t baseIp;
             uint32_t offset;
         };
         std::vector<IpGen> ipGroups{
             {0x00010101, 1}, {0x00010164, 1}, {0x000101c8, 1}, {0x00010201, 1},
             {0x00010264, 1}, {0x000102c8, 1}, {0x00010301, 1}, {0x00010364, 1},
             {0x000103c8, 1}, {0x00010401, 1}, {0x00010464, 1}, {0x000104c8, 1}};
 
         {
             // Make sure we produce addresses in different groups as expected
             std::set<std::vector<uint8_t>> groups;
             for (auto &[baseIp, _] : ipGroups) {
                 for (uint32_t j = 0; j < 255; j++) {
                     CNetAddr addr = ip(baseIp + (j << 24));
                     groups.insert(addr.GetGroup({}));
                 }
             }
             BOOST_CHECK_EQUAL(groups.size(), ipGroups.size());
         }
 
         // Generate contiguous addresses
         auto getAddrGroup = [&](size_t group, uint64_t services) {
             CNetAddr addr =
                 ip(ipGroups[group].baseIp + (ipGroups[group].offset++ << 24));
             return CAddress(CService(addr, Params().GetDefaultPort()),
                             ServiceFlags(services));
         };
 
         size_t addressCount = 0;
         for (const TestAddresses &addresses : testAddresses) {
             assert(addresses.group < ipGroups.size());
 
             addressCount += addresses.quantity;
             do {
                 addrman.Add({getAddrGroup(addresses.group,
                                           ServiceFlags(addresses.services))},
                             CNetAddr());
             } while (addrman.size() < addressCount);
         }
 
         interruptNet.reset();
         std::vector<std::string> empty;
         threadOpenConnections = std::thread(
             &CConnman::ThreadOpenConnections, this, empty,
             std::bind(&CConnmanTest::openNetworkConnection, this,
                       std::placeholders::_1, std::placeholders::_2));
 
         Mutex mutex;
         WAIT_LOCK(mutex, lock);
         bool ret = cvar.wait_for(lock, 10s, [&]() {
             LOCK(cs);
             return outboundFullRelayCount == expectedOutboundFullRelayCount &&
                    avalancheOutboundsCount == expectedAvalancheOutboundsCount;
         });
 
         interruptNet();
         if (threadOpenConnections.joinable()) {
             threadOpenConnections.join();
         }
 
         // Check each non avalanche outbound node belongs to a different group
         std::set<std::vector<uint8_t>> groups;
         ForEachNode([&](const CNode *pnode) {
             if (!pnode->IsAvalancheOutboundConnection()) {
                 groups.insert(pnode->addr.GetGroup({}));
             }
         });
         BOOST_CHECK_EQUAL(groups.size(), expectedOutboundFullRelayCount);
 
         return ret;
     }
 };
 } // namespace
 
 class NetTestConfig : public DummyConfig {
 public:
     bool SetMaxBlockSize(uint64_t maxBlockSize) override {
         nMaxBlockSize = maxBlockSize;
         return true;
     }
     uint64_t GetMaxBlockSize() const override { return nMaxBlockSize; }
 
 private:
     uint64_t nMaxBlockSize;
 };
 
 // Use TestingSetup or a daughter class so that m_node.addrman is non-null
 BOOST_FIXTURE_TEST_SUITE(net_tests, RegTestingSetup)
 
 BOOST_AUTO_TEST_CASE(cnode_listen_port) {
     // test default
     uint16_t port{GetListenPort()};
     BOOST_CHECK(port == Params().GetDefaultPort());
     // test set port
     uint16_t altPort = 12345;
     BOOST_CHECK(gArgs.SoftSetArg("-port", ToString(altPort)));
     port = GetListenPort();
     BOOST_CHECK(port == altPort);
 }
 
 BOOST_AUTO_TEST_CASE(cnode_simple_test) {
     SOCKET hSocket = INVALID_SOCKET;
     NodeId id = 0;
 
     in_addr ipv4Addr;
     ipv4Addr.s_addr = 0xa0b0c001;
 
     CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK);
     std::string pszDest;
 
     auto pnode1 =
         std::make_unique<CNode>(id++, hSocket, addr,
                                 /* nKeyedNetGroupIn = */ 0,
                                 /* nLocalHostNonceIn = */ 0,
                                 /* nLocalExtraEntropyIn */ 0, CAddress(),
                                 pszDest, ConnectionType::OUTBOUND_FULL_RELAY,
                                 /* inbound_onion = */ false);
     BOOST_CHECK(pnode1->IsFullOutboundConn() == true);
     BOOST_CHECK(pnode1->IsManualConn() == false);
     BOOST_CHECK(pnode1->IsBlockOnlyConn() == false);
     BOOST_CHECK(pnode1->IsFeelerConn() == false);
     BOOST_CHECK(pnode1->IsAddrFetchConn() == false);
     BOOST_CHECK(pnode1->IsInboundConn() == false);
     BOOST_CHECK(pnode1->m_inbound_onion == false);
     BOOST_CHECK_EQUAL(pnode1->ConnectedThroughNetwork(), Network::NET_IPV4);
 
     auto pnode2 =
         std::make_unique<CNode>(id++, hSocket, addr, 1, 1, 1, CAddress(),
                                 pszDest, ConnectionType::INBOUND, false);
     BOOST_CHECK(pnode2->IsFullOutboundConn() == false);
     BOOST_CHECK(pnode2->IsManualConn() == false);
     BOOST_CHECK(pnode2->IsBlockOnlyConn() == false);
     BOOST_CHECK(pnode2->IsFeelerConn() == false);
     BOOST_CHECK(pnode2->IsAddrFetchConn() == false);
     BOOST_CHECK(pnode2->IsInboundConn() == true);
     BOOST_CHECK(pnode2->m_inbound_onion == false);
     BOOST_CHECK_EQUAL(pnode2->ConnectedThroughNetwork(), Network::NET_IPV4);
 
     auto pnode3 = std::make_unique<CNode>(
         id++, hSocket, addr, 0, 0, 0, CAddress(), pszDest,
         ConnectionType::OUTBOUND_FULL_RELAY, false);
     BOOST_CHECK(pnode3->IsFullOutboundConn() == true);
     BOOST_CHECK(pnode3->IsManualConn() == false);
     BOOST_CHECK(pnode3->IsBlockOnlyConn() == false);
     BOOST_CHECK(pnode3->IsFeelerConn() == false);
     BOOST_CHECK(pnode3->IsAddrFetchConn() == false);
     BOOST_CHECK(pnode3->IsInboundConn() == false);
     BOOST_CHECK(pnode3->m_inbound_onion == false);
     BOOST_CHECK_EQUAL(pnode3->ConnectedThroughNetwork(), Network::NET_IPV4);
 
     auto pnode4 =
         std::make_unique<CNode>(id++, hSocket, addr, 1, 1, 1, CAddress(),
                                 pszDest, ConnectionType::INBOUND, true);
     BOOST_CHECK(pnode4->IsFullOutboundConn() == false);
     BOOST_CHECK(pnode4->IsManualConn() == false);
     BOOST_CHECK(pnode4->IsBlockOnlyConn() == false);
     BOOST_CHECK(pnode4->IsFeelerConn() == false);
     BOOST_CHECK(pnode4->IsAddrFetchConn() == false);
     BOOST_CHECK(pnode4->IsInboundConn() == true);
     BOOST_CHECK(pnode4->m_inbound_onion == true);
     BOOST_CHECK_EQUAL(pnode4->ConnectedThroughNetwork(), Network::NET_ONION);
 }
 
 BOOST_AUTO_TEST_CASE(test_getSubVersionEB) {
     BOOST_CHECK_EQUAL(getSubVersionEB(13800000000), "13800.0");
     BOOST_CHECK_EQUAL(getSubVersionEB(3800000000), "3800.0");
     BOOST_CHECK_EQUAL(getSubVersionEB(14000000), "14.0");
     BOOST_CHECK_EQUAL(getSubVersionEB(1540000), "1.5");
     BOOST_CHECK_EQUAL(getSubVersionEB(1560000), "1.5");
     BOOST_CHECK_EQUAL(getSubVersionEB(210000), "0.2");
     BOOST_CHECK_EQUAL(getSubVersionEB(10000), "0.0");
     BOOST_CHECK_EQUAL(getSubVersionEB(0), "0.0");
 }
 
 BOOST_AUTO_TEST_CASE(test_userAgent) {
     NetTestConfig config;
 
     config.SetMaxBlockSize(8000000);
     const std::string uacomment = "A very nice comment";
     gArgs.ForceSetMultiArg("-uacomment", {uacomment});
 
     const std::string versionMessage =
         "/Bitcoin ABC:" + ToString(CLIENT_VERSION_MAJOR) + "." +
         ToString(CLIENT_VERSION_MINOR) + "." +
         ToString(CLIENT_VERSION_REVISION) + "(EB8.0; " + uacomment + ")/";
 
     BOOST_CHECK_EQUAL(userAgent(config), versionMessage);
 }
 
 BOOST_AUTO_TEST_CASE(LimitedAndReachable_Network) {
     BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), true);
     BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), true);
     BOOST_CHECK_EQUAL(IsReachable(NET_ONION), true);
 
     SetReachable(NET_IPV4, false);
     SetReachable(NET_IPV6, false);
     SetReachable(NET_ONION, false);
 
     BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), false);
     BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), false);
     BOOST_CHECK_EQUAL(IsReachable(NET_ONION), false);
 
     SetReachable(NET_IPV4, true);
     SetReachable(NET_IPV6, true);
     SetReachable(NET_ONION, true);
 
     BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), true);
     BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), true);
     BOOST_CHECK_EQUAL(IsReachable(NET_ONION), true);
 }
 
 BOOST_AUTO_TEST_CASE(LimitedAndReachable_NetworkCaseUnroutableAndInternal) {
     BOOST_CHECK_EQUAL(IsReachable(NET_UNROUTABLE), true);
     BOOST_CHECK_EQUAL(IsReachable(NET_INTERNAL), true);
 
     SetReachable(NET_UNROUTABLE, false);
     SetReachable(NET_INTERNAL, false);
 
     // Ignored for both networks
     BOOST_CHECK_EQUAL(IsReachable(NET_UNROUTABLE), true);
     BOOST_CHECK_EQUAL(IsReachable(NET_INTERNAL), true);
 }
 
 CNetAddr UtilBuildAddress(uint8_t p1, uint8_t p2, uint8_t p3, uint8_t p4) {
     uint8_t ip[] = {p1, p2, p3, p4};
 
     struct sockaddr_in sa;
     // initialize the memory block
     memset(&sa, 0, sizeof(sockaddr_in));
     memcpy(&(sa.sin_addr), &ip, sizeof(ip));
     return CNetAddr(sa.sin_addr);
 }
 
 BOOST_AUTO_TEST_CASE(LimitedAndReachable_CNetAddr) {
     // 1.1.1.1
     CNetAddr addr = UtilBuildAddress(0x001, 0x001, 0x001, 0x001);
 
     SetReachable(NET_IPV4, true);
     BOOST_CHECK_EQUAL(IsReachable(addr), true);
 
     SetReachable(NET_IPV4, false);
     BOOST_CHECK_EQUAL(IsReachable(addr), false);
 
     // have to reset this, because this is stateful.
     SetReachable(NET_IPV4, true);
 }
 
 BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle) {
     // 2.1.1.1:1000
     CService addr =
         CService(UtilBuildAddress(0x002, 0x001, 0x001, 0x001), 1000);
 
     SetReachable(NET_IPV4, true);
 
     BOOST_CHECK_EQUAL(IsLocal(addr), false);
     BOOST_CHECK_EQUAL(AddLocal(addr, 1000), true);
     BOOST_CHECK_EQUAL(IsLocal(addr), true);
 
     RemoveLocal(addr);
     BOOST_CHECK_EQUAL(IsLocal(addr), false);
 }
 
 BOOST_AUTO_TEST_CASE(cnetaddr_basic) {
     CNetAddr addr;
 
     // IPv4, INADDR_ANY
     BOOST_REQUIRE(LookupHost("0.0.0.0", addr, false));
     BOOST_REQUIRE(!addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv4());
 
     BOOST_CHECK(addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "0.0.0.0");
 
     // IPv4, INADDR_NONE
     BOOST_REQUIRE(LookupHost("255.255.255.255", addr, false));
     BOOST_REQUIRE(!addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv4());
 
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "255.255.255.255");
 
     // IPv4, casual
     BOOST_REQUIRE(LookupHost("12.34.56.78", addr, false));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv4());
 
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "12.34.56.78");
 
     // IPv6, in6addr_any
     BOOST_REQUIRE(LookupHost("::", addr, false));
     BOOST_REQUIRE(!addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv6());
 
     BOOST_CHECK(addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "::");
 
     // IPv6, casual
     BOOST_REQUIRE(
         LookupHost("1122:3344:5566:7788:9900:aabb:ccdd:eeff", addr, false));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv6());
 
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(),
                       "1122:3344:5566:7788:9900:aabb:ccdd:eeff");
 
     // IPv6, scoped/link-local. See https://tools.ietf.org/html/rfc4007
     // We support non-negative decimal integers (uint32_t) as zone id indices.
     // Normal link-local scoped address functionality is to append "%" plus the
     // zone id, for example, given a link-local address of "fe80::1" and a zone
     // id of "32", return the address as "fe80::1%32".
     const std::string link_local{"fe80::1"};
     const std::string scoped_addr{link_local + "%32"};
     BOOST_REQUIRE(LookupHost(scoped_addr, addr, false));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv6());
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK_EQUAL(addr.ToString(), scoped_addr);
 
     // Test that the delimiter "%" and default zone id of 0 can be omitted for
     // the default scope.
     BOOST_REQUIRE(LookupHost(link_local + "%0", addr, false));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsIPv6());
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK_EQUAL(addr.ToString(), link_local);
 
     // TORv2
     BOOST_REQUIRE(addr.SetSpecial("6hzph5hv6337r6p2.onion"));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsTor());
 
     BOOST_CHECK(!addr.IsI2P());
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "6hzph5hv6337r6p2.onion");
 
     // TORv3
     const char *torv3_addr =
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion";
     BOOST_REQUIRE(addr.SetSpecial(torv3_addr));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsTor());
 
     BOOST_CHECK(!addr.IsI2P());
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(!addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), torv3_addr);
 
     // TORv3, broken, with wrong checksum
     BOOST_CHECK(!addr.SetSpecial(
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscsad.onion"));
 
     // TORv3, broken, with wrong version
     BOOST_CHECK(!addr.SetSpecial(
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscrye.onion"));
 
     // TORv3, malicious
     BOOST_CHECK(!addr.SetSpecial(std::string{
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd\0wtf.onion",
         66}));
 
     // TOR, bogus length
     BOOST_CHECK(!addr.SetSpecial(std::string{"mfrggzak.onion"}));
 
     // TOR, invalid base32
     BOOST_CHECK(!addr.SetSpecial(std::string{"mf*g zak.onion"}));
 
     // I2P
     const char *i2p_addr =
         "UDHDrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna.b32.I2P";
     BOOST_REQUIRE(addr.SetSpecial(i2p_addr));
     BOOST_REQUIRE(addr.IsValid());
     BOOST_REQUIRE(addr.IsI2P());
 
     BOOST_CHECK(!addr.IsTor());
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(!addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), ToLower(i2p_addr));
 
     // I2P, correct length, but decodes to less than the expected number of
     // bytes.
     BOOST_CHECK(!addr.SetSpecial(
         "udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jn=.b32.i2p"));
 
     // I2P, extra unnecessary padding
     BOOST_CHECK(!addr.SetSpecial(
         "udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v4jna=.b32.i2p"));
 
     // I2P, malicious
     BOOST_CHECK(!addr.SetSpecial(
         "udhdrtrcetjm5sxzskjyr5ztpeszydbh4dpl3pl4utgqqw2v\0wtf.b32.i2p"s));
 
     // I2P, valid but unsupported (56 Base32 characters)
     // See "Encrypted LS with Base 32 Addresses" in
     // https://geti2p.net/spec/encryptedleaseset.txt
     BOOST_CHECK(!addr.SetSpecial(
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscsad.b32.i2p"));
 
     // I2P, invalid base32
     BOOST_CHECK(!addr.SetSpecial(std::string{"tp*szydbh4dp.b32.i2p"}));
 
     // Internal
     addr.SetInternal("esffpp");
     // "internal" is considered invalid
     BOOST_REQUIRE(!addr.IsValid());
     BOOST_REQUIRE(addr.IsInternal());
 
     BOOST_CHECK(!addr.IsBindAny());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "esffpvrt3wpeaygy.internal");
 
     // Totally bogus
     BOOST_CHECK(!addr.SetSpecial("totally bogus"));
 }
 
 BOOST_AUTO_TEST_CASE(cnetaddr_serialize_v1) {
     CNetAddr addr;
     CDataStream s(SER_NETWORK, PROTOCOL_VERSION);
 
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "00000000000000000000000000000000");
     s.clear();
 
     BOOST_REQUIRE(LookupHost("1.2.3.4", addr, false));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "00000000000000000000ffff01020304");
     s.clear();
 
     BOOST_REQUIRE(
         LookupHost("1a1b:2a2b:3a3b:4a4b:5a5b:6a6b:7a7b:8a8b", addr, false));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "1a1b2a2b3a3b4a4b5a5b6a6b7a7b8a8b");
     s.clear();
 
     BOOST_REQUIRE(addr.SetSpecial("6hzph5hv6337r6p2.onion"));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "fd87d87eeb43f1f2f3f4f5f6f7f8f9fa");
     s.clear();
 
     BOOST_REQUIRE(addr.SetSpecial(
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion"));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "00000000000000000000000000000000");
     s.clear();
 
     addr.SetInternal("a");
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "fd6b88c08724ca978112ca1bbdcafac2");
     s.clear();
 }
 
 BOOST_AUTO_TEST_CASE(cnetaddr_serialize_v2) {
     CNetAddr addr;
     CDataStream s(SER_NETWORK, PROTOCOL_VERSION);
     // Add ADDRV2_FORMAT to the version so that the CNetAddr
     // serialize method produces an address in v2 format.
     s.SetVersion(s.GetVersion() | ADDRV2_FORMAT);
 
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "021000000000000000000000000000000000");
     s.clear();
 
     BOOST_REQUIRE(LookupHost("1.2.3.4", addr, false));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "010401020304");
     s.clear();
 
     BOOST_REQUIRE(
         LookupHost("1a1b:2a2b:3a3b:4a4b:5a5b:6a6b:7a7b:8a8b", addr, false));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "02101a1b2a2b3a3b4a4b5a5b6a6b7a7b8a8b");
     s.clear();
 
     BOOST_REQUIRE(addr.SetSpecial("6hzph5hv6337r6p2.onion"));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "030af1f2f3f4f5f6f7f8f9fa");
     s.clear();
 
     BOOST_REQUIRE(addr.SetSpecial(
         "kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion"));
     s << addr;
     BOOST_CHECK_EQUAL(
         HexStr(s),
         "042053cd5648488c4707914182655b7664034e09e66f7e8cbf1084e654eb56c5bd88");
     s.clear();
 
     BOOST_REQUIRE(addr.SetInternal("a"));
     s << addr;
     BOOST_CHECK_EQUAL(HexStr(s), "0210fd6b88c08724ca978112ca1bbdcafac2");
     s.clear();
 }
 
 BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2) {
     CNetAddr addr;
     CDataStream s(SER_NETWORK, PROTOCOL_VERSION);
     // Add ADDRV2_FORMAT to the version so that the CNetAddr
     // unserialize method expects an address in v2 format.
     s.SetVersion(s.GetVersion() | ADDRV2_FORMAT);
 
     // Valid IPv4.
     s << Span{ParseHex("01"          // network type (IPv4)
                        "04"          // address length
                        "01020304")}; // address
     s >> addr;
     BOOST_CHECK(addr.IsValid());
     BOOST_CHECK(addr.IsIPv4());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "1.2.3.4");
     BOOST_REQUIRE(s.empty());
 
     // Invalid IPv4, valid length but address itself is shorter.
     s << Span{ParseHex("01"      // network type (IPv4)
                        "04"      // address length
                        "0102")}; // address
     BOOST_CHECK_EXCEPTION(s >> addr, std::ios_base::failure,
                           HasReason("end of data"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Invalid IPv4, with bogus length.
     s << Span{ParseHex("01"          // network type (IPv4)
                        "05"          // address length
                        "01020304")}; // address
     BOOST_CHECK_EXCEPTION(
         s >> addr, std::ios_base::failure,
         HasReason("BIP155 IPv4 address with length 5 (should be 4)"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Invalid IPv4, with extreme length.
     s << Span{ParseHex("01"          // network type (IPv4)
                        "fd0102"      // address length (513 as CompactSize)
                        "01020304")}; // address
     BOOST_CHECK_EXCEPTION(s >> addr, std::ios_base::failure,
                           HasReason("Address too long: 513 > 512"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Valid IPv6.
     s << Span{ParseHex("02" // network type (IPv6)
                        "10" // address length
                        "0102030405060708090a0b0c0d0e0f10")}; // address
     s >> addr;
     BOOST_CHECK(addr.IsValid());
     BOOST_CHECK(addr.IsIPv6());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "102:304:506:708:90a:b0c:d0e:f10");
     BOOST_REQUIRE(s.empty());
 
     // Valid IPv6, contains embedded "internal".
     s << Span{
         ParseHex("02"                                  // network type (IPv6)
                  "10"                                  // address length
                  "fd6b88c08724ca978112ca1bbdcafac2")}; // address: 0xfd +
                                                        // sha256("bitcoin")[0:5]
                                                        // + sha256(name)[0:10]
     s >> addr;
     BOOST_CHECK(addr.IsInternal());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "zklycewkdo64v6wc.internal");
     BOOST_REQUIRE(s.empty());
 
     // Invalid IPv6, with bogus length.
     s << Span{ParseHex("02"    // network type (IPv6)
                        "04"    // address length
                        "00")}; // address
     BOOST_CHECK_EXCEPTION(
         s >> addr, std::ios_base::failure,
         HasReason("BIP155 IPv6 address with length 4 (should be 16)"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Invalid IPv6, contains embedded IPv4.
     s << Span{ParseHex("02" // network type (IPv6)
                        "10" // address length
                        "00000000000000000000ffff01020304")}; // address
     s >> addr;
     BOOST_CHECK(!addr.IsValid());
     BOOST_REQUIRE(s.empty());
 
     // Invalid IPv6, contains embedded TORv2.
     s << Span{ParseHex("02" // network type (IPv6)
                        "10" // address length
                        "fd87d87eeb430102030405060708090a")}; // address
     s >> addr;
     BOOST_CHECK(!addr.IsValid());
     BOOST_REQUIRE(s.empty());
 
     // Valid TORv2.
     s << Span{ParseHex("03"                      // network type (TORv2)
                        "0a"                      // address length
                        "f1f2f3f4f5f6f7f8f9fa")}; // address
     s >> addr;
     BOOST_CHECK(addr.IsValid());
     BOOST_CHECK(addr.IsTor());
     BOOST_CHECK(addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "6hzph5hv6337r6p2.onion");
     BOOST_REQUIRE(s.empty());
 
     // Invalid TORv2, with bogus length.
     s << Span{ParseHex("03"    // network type (TORv2)
                        "07"    // address length
                        "00")}; // address
     BOOST_CHECK_EXCEPTION(
         s >> addr, std::ios_base::failure,
         HasReason("BIP155 TORv2 address with length 7 (should be 10)"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Valid TORv3.
     s << Span{ParseHex("04" // network type (TORv3)
                        "20" // address length
                        "79bcc625184b05194975c28b66b66b04" // address
                        "69f7f6556fb1ac3189a79b40dda32f1f")};
     s >> addr;
     BOOST_CHECK(addr.IsValid());
     BOOST_CHECK(addr.IsTor());
     BOOST_CHECK(!addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(
         addr.ToString(),
         "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion");
     BOOST_REQUIRE(s.empty());
 
     // Invalid TORv3, with bogus length.
     s << Span{ParseHex("04" // network type (TORv3)
                        "00" // address length
                        "00" // address
                        )};
     BOOST_CHECK_EXCEPTION(
         s >> addr, std::ios_base::failure,
         HasReason("BIP155 TORv3 address with length 0 (should be 32)"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Valid I2P.
     s << Span{ParseHex("05"                               // network type (I2P)
                        "20"                               // address length
                        "a2894dabaec08c0051a481a6dac88b64" // address
                        "f98232ae42d4b6fd2fa81952dfe36a87")};
     s >> addr;
     BOOST_CHECK(addr.IsValid());
     BOOST_CHECK(addr.IsI2P());
     BOOST_CHECK(!addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(
         addr.ToString(),
         "ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p");
     BOOST_REQUIRE(s.empty());
 
     // Invalid I2P, with bogus length.
     s << Span{ParseHex("05" // network type (I2P)
                        "03" // address length
                        "00" // address
                        )};
     BOOST_CHECK_EXCEPTION(
         s >> addr, std::ios_base::failure,
         HasReason("BIP155 I2P address with length 3 (should be 32)"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Valid CJDNS.
     s << Span{ParseHex("06" // network type (CJDNS)
                        "10" // address length
                        "fc000001000200030004000500060007" // address
                        )};
     s >> addr;
     BOOST_CHECK(addr.IsValid());
     BOOST_CHECK(addr.IsCJDNS());
     BOOST_CHECK(!addr.IsAddrV1Compatible());
     BOOST_CHECK_EQUAL(addr.ToString(), "fc00:1:2:3:4:5:6:7");
     BOOST_REQUIRE(s.empty());
 
     // Invalid CJDNS, with bogus length.
     s << Span{ParseHex("06" // network type (CJDNS)
                        "01" // address length
                        "00" // address
                        )};
     BOOST_CHECK_EXCEPTION(
         s >> addr, std::ios_base::failure,
         HasReason("BIP155 CJDNS address with length 1 (should be 16)"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Unknown, with extreme length.
     s << Span{ParseHex("aa"         // network type (unknown)
                        "fe00000002" // address length (CompactSize's MAX_SIZE)
                        "01020304050607" // address
                        )};
 
     BOOST_CHECK_EXCEPTION(s >> addr, std::ios_base::failure,
                           HasReason("Address too long: 33554432 > 512"));
     BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input.
     s.clear();
 
     // Unknown, with reasonable length.
     s << Span{ParseHex("aa"       // network type (unknown)
                        "04"       // address length
                        "01020304" // address
                        )};
     s >> addr;
     BOOST_CHECK(!addr.IsValid());
     BOOST_REQUIRE(s.empty());
 
     // Unknown, with zero length.
     s << Span{ParseHex("aa" // network type (unknown)
                        "00" // address length
                        ""   // address
                        )};
     s >> addr;
     BOOST_CHECK(!addr.IsValid());
     BOOST_REQUIRE(s.empty());
 }
 
 // prior to PR #14728, this test triggers an undefined behavior
 BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) {
     // set up local addresses; all that's necessary to reproduce the bug is
     // that a normal IPv4 address is among the entries, but if this address is
     // !IsRoutable the undefined behavior is easier to trigger deterministically
     in_addr raw_addr;
     raw_addr.s_addr = htonl(0x7f000001);
     const CNetAddr mapLocalHost_entry = CNetAddr(raw_addr);
     {
         LOCK(g_maplocalhost_mutex);
         LocalServiceInfo lsi;
         lsi.nScore = 23;
         lsi.nPort = 42;
         mapLocalHost[mapLocalHost_entry] = lsi;
     }
 
     // create a peer with an IPv4 address
     in_addr ipv4AddrPeer;
     ipv4AddrPeer.s_addr = 0xa0b0c001;
     CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK);
     std::unique_ptr<CNode> pnode = std::make_unique<CNode>(
         0, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0,
         /* nLocalHostNonceIn */ 0, /* nLocalExtraEntropyIn */ 0, CAddress{},
         /* pszDest */ std::string{}, ConnectionType::OUTBOUND_FULL_RELAY,
         /* inbound_onion = */ false);
     pnode->fSuccessfullyConnected.store(true);
 
     // the peer claims to be reaching us via IPv6
     in6_addr ipv6AddrLocal;
     memset(ipv6AddrLocal.s6_addr, 0, 16);
     ipv6AddrLocal.s6_addr[0] = 0xcc;
     CAddress addrLocal = CAddress(CService(ipv6AddrLocal, 7777), NODE_NETWORK);
     pnode->SetAddrLocal(addrLocal);
 
     // before patch, this causes undefined behavior detectable with clang's
     // -fsanitize=memory
     GetLocalAddrForPeer(*pnode);
 
     // suppress no-checks-run warning; if this test fails, it's by triggering a
     // sanitizer
     BOOST_CHECK(1);
 
     // Cleanup, so that we don't confuse other tests.
     {
         LOCK(g_maplocalhost_mutex);
         mapLocalHost.erase(mapLocalHost_entry);
     }
 }
 
 BOOST_AUTO_TEST_CASE(get_local_addr_for_peer_port) {
     // Test that GetLocalAddrForPeer() properly selects the address to
     // self-advertise:
     //
     // 1. GetLocalAddrForPeer() calls GetLocalAddress() which returns an address
     // that is
     //    not routable.
     // 2. GetLocalAddrForPeer() overrides the address with whatever the peer has
     // told us
     //    he sees us as.
     // 2.1. For inbound connections we must override both the address and the
     // port. 2.2. For outbound connections we must override only the address.
 
     // Pretend that we bound to this port.
     const uint16_t bind_port = 20001;
     m_node.args->ForceSetArg("-bind", strprintf("3.4.5.6:%u", bind_port));
 
     // Our address:port as seen from the peer, completely different from the
     // above.
     in_addr peer_us_addr;
     peer_us_addr.s_addr = htonl(0x02030405);
     const CService peer_us{peer_us_addr, 20002};
 
     // Create a peer with a routable IPv4 address (outbound).
     in_addr peer_out_in_addr;
     peer_out_in_addr.s_addr = htonl(0x01020304);
     CNode peer_out{
         /*id=*/0,
         /*hSocketIn=*/INVALID_SOCKET,
         /*addrIn=*/CAddress{CService{peer_out_in_addr, 8333}, NODE_NETWORK},
         /*nKeyedNetGroupIn=*/0,
         /*nLocalHostNonceIn=*/0,
         /*nLocalExtraEntropyIn=*/0,
         /*addrBindIn=*/CAddress{},
         /*addrNameIn=*/std::string{},
         /*conn_type_in=*/ConnectionType::OUTBOUND_FULL_RELAY,
         /*inbound_onion=*/false};
     peer_out.fSuccessfullyConnected = true;
     peer_out.SetAddrLocal(peer_us);
 
     // Without the fix peer_us:8333 is chosen instead of the proper
     // peer_us:bind_port.
     auto chosen_local_addr = GetLocalAddrForPeer(peer_out);
     BOOST_REQUIRE(chosen_local_addr);
     const CService expected{peer_us_addr, bind_port};
     BOOST_CHECK(*chosen_local_addr == expected);
 
     // Create a peer with a routable IPv4 address (inbound).
     in_addr peer_in_in_addr;
     peer_in_in_addr.s_addr = htonl(0x05060708);
     CNode peer_in{
         /*id=*/0,
         /*hSocketIn=*/INVALID_SOCKET,
         /*addrIn=*/CAddress{CService{peer_in_in_addr, 8333}, NODE_NETWORK},
         /*nKeyedNetGroupIn=*/0,
         /*nLocalHostNonceIn=*/0,
         /*nLocalExtraEntropyIn=*/0,
         /*addrBindIn=*/CAddress{},
         /*addrNameIn=*/std::string{},
         /*conn_type_in=*/ConnectionType::INBOUND,
         /*inbound_onion=*/false};
     peer_in.fSuccessfullyConnected = true;
     peer_in.SetAddrLocal(peer_us);
 
     // Without the fix peer_us:8333 is chosen instead of the proper
     // peer_us:peer_us.GetPort().
     chosen_local_addr = GetLocalAddrForPeer(peer_in);
     BOOST_REQUIRE(chosen_local_addr);
     BOOST_CHECK(*chosen_local_addr == peer_us);
 
     m_node.args->ForceSetArg("-bind", "");
 }
 
 BOOST_AUTO_TEST_CASE(avalanche_statistics) {
     const std::vector<std::tuple<uint32_t, uint32_t, double>> testCases = {
         // {step, tau, decay_factor}
         {10, 100, 1. - std::exp(-1. * 10 / 100)},
         // Current defaults
         {AVALANCHE_STATISTICS_REFRESH_PERIOD.count(),
          AVALANCHE_STATISTICS_TIME_CONSTANT.count(),
          AVALANCHE_STATISTICS_DECAY_FACTOR},
     };
     for (const auto &[step, tau, decayFactor] : testCases) {
         in_addr ipv4Addr;
         ipv4Addr.s_addr = 0xa0b0c001;
         CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK);
         std::unique_ptr<CNode> pnode = std::make_unique<CNode>(
             0, INVALID_SOCKET, addr, 0, 0, 0, CAddress(), std::string{},
             ConnectionType::OUTBOUND_FULL_RELAY, false);
         pnode->m_avalanche_enabled = true;
 
         double previousScore = pnode->getAvailabilityScore();
         BOOST_CHECK_SMALL(previousScore, 1e-6);
 
         // Check the statistics follow an exponential response for 1 to 10 tau
         for (size_t i = 1; i <= 10; i++) {
             for (uint32_t j = 0; j < tau; j += step) {
                 pnode->invsPolled(1);
                 // Always respond to everything correctly
                 pnode->invsVoted(1);
 
                 pnode->updateAvailabilityScore(decayFactor);
 
                 // Expect a monotonic rise
                 double currentScore = pnode->getAvailabilityScore();
                 BOOST_CHECK_GE(currentScore, previousScore);
                 previousScore = currentScore;
             }
 
             // We expect (1 - e^-i) after i * tau. The tolerance is expressed
             // as a percentage, and we add a (large) 0.1% margin to account for
             // floating point errors.
             BOOST_CHECK_CLOSE(previousScore, -1 * std::expm1(-1. * i),
                               100.1 / tau);
         }
 
         // After 10 tau we should be very close to 100% (about 99.995%)
         BOOST_CHECK_CLOSE(previousScore, 1., 0.01);
 
         for (size_t i = 1; i <= 3; i++) {
             for (uint32_t j = 0; j < tau; j += step) {
                 pnode->invsPolled(2);
 
                 // Stop responding to the polls.
                 pnode->invsVoted(1);
 
                 pnode->updateAvailabilityScore(decayFactor);
 
                 // Expect a monotonic fall
                 double currentScore = pnode->getAvailabilityScore();
                 BOOST_CHECK_LE(currentScore, previousScore);
                 previousScore = currentScore;
             }
 
             // There is a slight error in the expected value because we did not
             // start the decay at exactly 100%, but the 0.1% margin is at least
             // an order of magnitude larger than the expected error so it
             // doesn't matter.
             BOOST_CHECK_CLOSE(previousScore, 1. + std::expm1(-1. * i),
                               100.1 / tau);
         }
 
         // After 3 more tau we should be under 5%
         BOOST_CHECK_LT(previousScore, .05);
 
         for (size_t i = 1; i <= 100; i++) {
             pnode->invsPolled(10);
 
             // Completely stop responding to the polls.
             pnode->invsVoted(0);
 
             pnode->updateAvailabilityScore(decayFactor);
 
             // It's still a monotonic fall, and the score should turn negative.
             double currentScore = pnode->getAvailabilityScore();
             BOOST_CHECK_LE(currentScore, previousScore);
             BOOST_CHECK_LE(currentScore, 0.);
             previousScore = currentScore;
         }
     }
 }
 
 BOOST_AUTO_TEST_CASE(get_extra_full_outbound_count) {
     CConnmanTest connman(GetConfig(), 0x1337, 0x1337, *m_node.addrman);
 
     auto checkExtraFullOutboundCount = [&](size_t fullOutboundCount,
                                            size_t avalancheOutboundCount,
                                            int expectedExtraCount) {
         connman.ClearNodes();
         for (size_t i = 0; i < fullOutboundCount; i++) {
             connman.AddNode(ConnectionType::OUTBOUND_FULL_RELAY);
         }
         for (size_t i = 0; i < avalancheOutboundCount; i++) {
             connman.AddNode(ConnectionType::AVALANCHE_OUTBOUND);
         }
         BOOST_CHECK_EQUAL(connman.GetExtraFullOutboundCount(),
                           expectedExtraCount);
     };
 
     connman.SetMaxOutbounds(0, 0);
     checkExtraFullOutboundCount(0, 0, 0);
     checkExtraFullOutboundCount(1, 0, 1);
     checkExtraFullOutboundCount(0, 1, 1);
     checkExtraFullOutboundCount(5, 5, 10);
 
     connman.SetMaxOutbounds(4, 0);
     checkExtraFullOutboundCount(0, 0, 0);
     checkExtraFullOutboundCount(1, 0, 0);
     checkExtraFullOutboundCount(0, 1, 0);
     checkExtraFullOutboundCount(4, 0, 0);
     checkExtraFullOutboundCount(0, 4, 0);
     checkExtraFullOutboundCount(2, 2, 0);
     checkExtraFullOutboundCount(5, 5, 6);
 
     connman.SetMaxOutbounds(4, 4);
     checkExtraFullOutboundCount(0, 0, 0);
     checkExtraFullOutboundCount(1, 0, 0);
     checkExtraFullOutboundCount(0, 1, 0);
     checkExtraFullOutboundCount(4, 0, 0);
     checkExtraFullOutboundCount(0, 4, 0);
     checkExtraFullOutboundCount(4, 4, 0);
     checkExtraFullOutboundCount(5, 5, 2);
 }
 
 BOOST_FIXTURE_TEST_CASE(net_group_limit, TestChain100Setup) {
     m_node.connman = std::make_unique<CConnmanTest>(GetConfig(), 0x1337, 0x1337,
                                                     *m_node.addrman);
     m_node.peerman =
         PeerManager::make(*m_node.connman, *m_node.addrman, m_node.banman.get(),
                           *m_node.chainman, *m_node.mempool, false);
 
     bilingual_str error;
     // Init the global avalanche object otherwise the avalanche outbound
     // slots are not allocated.
     g_avalanche = avalanche::Processor::MakeProcessor(
         *m_node.args, *m_node.chain, m_node.connman.get(), *m_node.chainman,
         m_node.mempool.get(), *m_node.scheduler, error);
     BOOST_CHECK(g_avalanche);
 
     CConnman::Options options;
     options.nMaxConnections = 200;
     options.m_max_outbound_full_relay = 8;
     options.m_max_avalanche_outbound = 60;
 
     auto connman = static_cast<CConnmanTest *>(m_node.connman.get());
     connman->MakeAddrmanDeterministic();
     connman->Init(options);
 
     // Single full relay outbound is no problem
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK, 1},
         },
         1, // Expected full-relay outbound count
         0  // Expected avalanche outbound count
         ));
 
     // Adding more contiguous full relay outbounds fails due to network group
     // limitation
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK, 3},
         },
         1, // Expected full-relay outbound count
         0  // Expected avalanche outbound count
         ));
 
     // Outbounds from different groups can be connected
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK, 1},
             {1, NODE_NETWORK, 1},
             {2, NODE_NETWORK, 1},
         },
         3, // Expected full-relay outbound count
         0  // Expected avalanche outbound count
         ));
 
     // Up to the max
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK, 1},
             {1, NODE_NETWORK, 1},
             {2, NODE_NETWORK, 1},
             {3, NODE_NETWORK, 1},
             {4, NODE_NETWORK, 1},
             {5, NODE_NETWORK, 1},
             {6, NODE_NETWORK, 1},
             {7, NODE_NETWORK, 1},
             {8, NODE_NETWORK, 1},
             {9, NODE_NETWORK, 1},
             {10, NODE_NETWORK, 1},
             {11, NODE_NETWORK, 1},
         },
         options.m_max_outbound_full_relay, // Expected full-relay outbound count
         0                                  // Expected avalanche outbound count
         ));
 
     // Avalanche outbounds are prioritized, so contiguous full relay outbounds
     // will fail due to network group limitation
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK | NODE_AVALANCHE, 1},
             {0, NODE_NETWORK, 3},
         },
         0, // Expected full-relay outbound count
         1  // Expected avalanche outbound count
         ));
 
     // Adding more avalanche outbounds is fine
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK | NODE_AVALANCHE, 3},
             {0, NODE_NETWORK, 3},
         },
         0, // Expected full-relay outbound count
         3  // Expected avalanche outbound count
         ));
 
     // Group limit still applies to non avalanche outbounds, which also remain
     // capped to the max from the connman options.
     BOOST_CHECK(connman->checkContiguousAddressesConnection(
         {
             // group, services, quantity
             {0, NODE_NETWORK | NODE_AVALANCHE, 50},
             {1, NODE_NETWORK, 10},
             {2, NODE_NETWORK, 10},
             {3, NODE_NETWORK, 10},
             {4, NODE_NETWORK, 10},
             {5, NODE_NETWORK, 10},
             {6, NODE_NETWORK, 10},
             {7, NODE_NETWORK, 10},
             {8, NODE_NETWORK, 10},
             {9, NODE_NETWORK, 10},
             {10, NODE_NETWORK, 10},
             {11, NODE_NETWORK, 10},
         },
         options.m_max_outbound_full_relay, // Expected full-relay outbound count
         50                                 // Expected avalanche outbound count
         ));
 
     g_avalanche.reset();
 }
 
 BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) {
     // Tests the following scenario:
     // * -bind=3.4.5.6:20001 is specified
     // * we make an outbound connection to a peer
     // * the peer reports he sees us as 2.3.4.5:20002 in the version message
     //   (20002 is a random port assigned by our OS for the outgoing TCP
     //   connection, we cannot accept connections to it)
     // * we should self-advertise to that peer as 2.3.4.5:20001
 
     // Pretend that we bound to this port.
     const uint16_t bind_port = 20001;
     m_node.args->ForceSetArg("-bind", strprintf("3.4.5.6:%u", bind_port));
     m_node.args->ForceSetArg("-capturemessages", "1");
 
     // Our address:port as seen from the peer - 2.3.4.5:20002 (different from
     // the above).
     in_addr peer_us_addr;
     peer_us_addr.s_addr = htonl(0x02030405);
     const CService peer_us{peer_us_addr, 20002};
 
     // Create a peer with a routable IPv4 address.
     in_addr peer_in_addr;
     peer_in_addr.s_addr = htonl(0x01020304);
     CNode peer{/*id=*/0,
                /*hSocketIn=*/INVALID_SOCKET,
                /*addrIn=*/CAddress{CService{peer_in_addr, 8333}, NODE_NETWORK},
                /*nKeyedNetGroupIn=*/0,
                /*nLocalHostNonceIn=*/0,
                /*nLocalExtraEntropyIn=*/0,
                /*addrBindIn=*/CAddress{},
                /*addrNameIn=*/std::string{},
                /*conn_type_in=*/ConnectionType::OUTBOUND_FULL_RELAY,
                /*inbound_onion=*/false};
 
     const uint64_t services{NODE_NETWORK};
     const int64_t time{0};
     const CNetMsgMaker msg_maker{PROTOCOL_VERSION};
 
     // Force CChainState::IsInitialBlockDownload() to return false.
     // Otherwise PushAddress() isn't called by PeerManager::ProcessMessage().
     TestChainState &chainstate =
         *static_cast<TestChainState *>(&m_node.chainman->ActiveChainstate());
     chainstate.JumpOutOfIbd();
 
     const Config &config = GetConfig();
 
     m_node.peerman->InitializeNode(config, peer, NODE_NETWORK);
 
     std::atomic<bool> interrupt_dummy{false};
     std::chrono::microseconds time_received_dummy{0};
 
     const auto msg_version =
         msg_maker.Make(NetMsgType::VERSION, PROTOCOL_VERSION, services, time,
                        services, peer_us);
     CDataStream msg_version_stream{msg_version.data, SER_NETWORK,
                                    PROTOCOL_VERSION};
 
     m_node.peerman->ProcessMessage(config, peer, NetMsgType::VERSION,
                                    msg_version_stream, time_received_dummy,
                                    interrupt_dummy);
 
     const auto msg_verack = msg_maker.Make(NetMsgType::VERACK);
     CDataStream msg_verack_stream{msg_verack.data, SER_NETWORK,
                                   PROTOCOL_VERSION};
 
     // Will set peer.fSuccessfullyConnected to true (necessary in
     // SendMessages()).
     m_node.peerman->ProcessMessage(config, peer, NetMsgType::VERACK,
                                    msg_verack_stream, time_received_dummy,
                                    interrupt_dummy);
 
     // Ensure that peer_us_addr:bind_port is sent to the peer.
     const CService expected{peer_us_addr, bind_port};
     bool sent{false};
 
     const auto CaptureMessageOrig = CaptureMessage;
     CaptureMessage =
         [&sent, &expected](const CAddress &addr, const std::string &msg_type,
                            Span<const uint8_t> data, bool is_incoming) -> void {
         if (!is_incoming && msg_type == "addr") {
             CDataStream s(data, SER_NETWORK, PROTOCOL_VERSION);
             std::vector<CAddress> addresses;
 
             s >> addresses;
 
             for (const auto &deserialized_addr : addresses) {
                 if (deserialized_addr == expected) {
                     sent = true;
                     return;
                 }
             }
         }
     };
 
     {
         LOCK(peer.cs_sendProcessing);
         m_node.peerman->SendMessages(config, &peer);
     }
 
     BOOST_CHECK(sent);
 
     CaptureMessage = CaptureMessageOrig;
     chainstate.ResetIbd();
     m_node.args->ForceSetArg("-capturemessages", "0");
     m_node.args->ForceSetArg("-bind", "");
     // PeerManager::ProcessMessage() calls AddTimeData() which changes the
     // internal state in timedata.cpp and later confuses the test
     // "timedata_tests/addtimedata". Thus reset that state as it was before our
     // test was run.
     TestOnlyResetTimeData();
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/validation.h b/src/validation.h
index 2d06fcea4..60c93d90b 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -1,1438 +1,1451 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2019 The Bitcoin Core developers
 // Copyright (c) 2017-2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_VALIDATION_H
 #define BITCOIN_VALIDATION_H
 
 #if defined(HAVE_CONFIG_H)
 #include <config/bitcoin-config.h>
 #endif
 
 #include <arith_uint256.h>
 #include <attributes.h>
 #include <blockfileinfo.h>
 #include <blockindexcomparators.h>
 #include <bloom.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <consensus/amount.h>
 #include <consensus/consensus.h>
 #include <disconnectresult.h>
 #include <flatfile.h>
 #include <fs.h>
 #include <node/blockstorage.h>
 #include <policy/packages.h>
 #include <script/script_error.h>
 #include <script/script_metrics.h>
 #include <shutdown.h>
 #include <sync.h>
 #include <txdb.h>
 #include <txmempool.h> // For CTxMemPool::cs
 #include <uint256.h>
 #include <util/check.h>
 #include <util/translation.h>
 
 #include <atomic>
 #include <cstdint>
 #include <map>
 #include <memory>
 #include <optional>
 #include <set>
 #include <string>
 #include <thread>
 #include <utility>
 #include <vector>
 
 class BlockPolicyValidationState;
 class CChainParams;
 class Chainstate;
 class ChainstateManager;
 class CScriptCheck;
 class CTxMemPool;
 class CTxUndo;
 class DisconnectedBlockTransactions;
 
 struct ChainTxData;
 struct FlatFilePos;
 struct PrecomputedTransactionData;
 struct LockPoints;
 struct AssumeutxoData;
 namespace node {
 class SnapshotMetadata;
 } // namespace node
 namespace Consensus {
 struct Params;
 } // namespace Consensus
 
 namespace Consensus {
 struct Params;
 }
 
 #define MIN_TRANSACTION_SIZE                                                   \
     (::GetSerializeSize(CTransaction(), PROTOCOL_VERSION))
 
 /** Default for -minrelaytxfee, minimum relay fee for transactions */
 static const Amount DEFAULT_MIN_RELAY_TX_FEE_PER_KB(1000 * SATOSHI);
 /** Default for -excessutxocharge for transactions transactions */
 static const Amount DEFAULT_UTXO_FEE = Amount::zero();
 /**
  * Default for -mempoolexpiry, expiration time for mempool transactions in
  * hours.
  */
 static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 336;
 /** Maximum number of dedicated script-checking threads allowed */
 static const int MAX_SCRIPTCHECK_THREADS = 15;
 /** -par default (number of script-checking threads, 0 = auto) */
 static const int DEFAULT_SCRIPTCHECK_THREADS = 0;
 static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60;
 static const bool DEFAULT_CHECKPOINTS_ENABLED = true;
 static const bool DEFAULT_TXINDEX = false;
 static constexpr bool DEFAULT_COINSTATSINDEX{false};
 static const char *const DEFAULT_BLOCKFILTERINDEX = "0";
 
 /** Default for -persistmempool */
 static const bool DEFAULT_PERSIST_MEMPOOL = true;
 
 static const bool DEFAULT_PEERBLOOMFILTERS = true;
 
 /** Default for -stopatheight */
 static const int DEFAULT_STOPATHEIGHT = 0;
 /**
  * Block files containing a block-height within MIN_BLOCKS_TO_KEEP of
  * ActiveChain().Tip() will not be pruned.
  */
 static const unsigned int MIN_BLOCKS_TO_KEEP = 288;
 static const signed int DEFAULT_CHECKBLOCKS = 6;
 static const unsigned int DEFAULT_CHECKLEVEL = 3;
 /**
  * Require that user allocate at least 550 MiB for block & undo files
  * (blk???.dat and rev???.dat)
  * At 1MB per block, 288 blocks = 288MB.
  * Add 15% for Undo data = 331MB
  * Add 20% for Orphan block rate = 397MB
  * We want the low water mark after pruning to be at least 397 MB and since we
  * prune in full block file chunks, we need the high water mark which triggers
  * the prune to be one 128MB block file + added 15% undo data = 147MB greater
  * for a total of 545MB
  * Setting the target to >= 550 MiB will make it likely we can respect the
  * target.
  */
 static const uint64_t MIN_DISK_SPACE_FOR_BLOCK_FILES = 550 * 1024 * 1024;
 
 /** Current sync state passed to tip changed callbacks. */
 enum class SynchronizationState { INIT_REINDEX, INIT_DOWNLOAD, POST_INIT };
 
 extern RecursiveMutex cs_main;
 extern Mutex g_best_block_mutex;
 extern std::condition_variable g_best_block_cv;
 /** Used to notify getblocktemplate RPC of new tips. */
 extern uint256 g_best_block;
 extern bool fRequireStandard;
 extern bool fCheckBlockIndex;
 extern bool fCheckpointsEnabled;
 
 /**
  * A fee rate smaller than this is considered zero fee (for relaying, mining and
  * transaction creation)
  */
 extern CFeeRate minRelayTxFee;
 /**
  * If the tip is older than this (in seconds), the node is considered to be in
  * initial block download.
  */
 extern int64_t nMaxTipAge;
 
 /**
  * Block hash whose ancestors we will assume to have valid scripts without
  * checking them.
  */
 extern BlockHash hashAssumeValid;
 
 /**
  * Minimum work we will assume exists on some valid chain.
  */
 extern arith_uint256 nMinimumChainWork;
 
 /** Documentation for argument 'checklevel'. */
 extern const std::vector<std::string> CHECKLEVEL_DOC;
 
 class BlockValidationOptions {
 private:
     uint64_t excessiveBlockSize;
     bool checkPoW : 1;
     bool checkMerkleRoot : 1;
 
 public:
     // Do full validation by default
     explicit BlockValidationOptions(const Config &config);
     explicit BlockValidationOptions(uint64_t _excessiveBlockSize,
                                     bool _checkPow = true,
                                     bool _checkMerkleRoot = true)
         : excessiveBlockSize(_excessiveBlockSize), checkPoW(_checkPow),
           checkMerkleRoot(_checkMerkleRoot) {}
 
     BlockValidationOptions withCheckPoW(bool _checkPoW = true) const {
         BlockValidationOptions ret = *this;
         ret.checkPoW = _checkPoW;
         return ret;
     }
 
     BlockValidationOptions
     withCheckMerkleRoot(bool _checkMerkleRoot = true) const {
         BlockValidationOptions ret = *this;
         ret.checkMerkleRoot = _checkMerkleRoot;
         return ret;
     }
 
     bool shouldValidatePoW() const { return checkPoW; }
     bool shouldValidateMerkleRoot() const { return checkMerkleRoot; }
     uint64_t getExcessiveBlockSize() const { return excessiveBlockSize; }
 };
 
 /**
  * Run instances of script checking worker threads
  */
 void StartScriptCheckWorkerThreads(int threads_num);
 
 /**
  * Stop all of the script checking worker threads
  */
 void StopScriptCheckWorkerThreads();
 
 Amount GetBlockSubsidy(int nHeight, const Consensus::Params &consensusParams);
 
 bool AbortNode(BlockValidationState &state, const std::string &strMessage,
                const bilingual_str &userMessage = bilingual_str{});
 
 /**
  * Guess verification progress (as a fraction between 0.0=genesis and
  * 1.0=current tip).
  */
 double GuessVerificationProgress(const ChainTxData &data,
                                  const CBlockIndex *pindex);
 
 /** Prune block files up to a given height */
 void PruneBlockFilesManual(Chainstate &active_chainstate,
                            int nManualPruneHeight);
 
 /**
  * Validation result for a single transaction mempool acceptance.
  */
 struct MempoolAcceptResult {
     /** Used to indicate the results of mempool validation. */
     enum class ResultType {
         //! Fully validated, valid.
         VALID,
         //! Invalid.
         INVALID,
         //! Valid, transaction was already in the mempool.
         MEMPOOL_ENTRY,
     };
     const ResultType m_result_type;
     const TxValidationState m_state;
 
     // The following fields are only present when m_result_type =
     // ResultType::VALID or MEMPOOL_ENTRY
     /**
      * Virtual size as used by the mempool, calculated using serialized size
      * and sigchecks.
      */
     const std::optional<int64_t> m_vsize;
     /** Raw base fees in satoshis. */
     const std::optional<Amount> m_base_fees;
     static MempoolAcceptResult Failure(TxValidationState state) {
         return MempoolAcceptResult(state);
     }
 
     /** Constructor for success case */
     static MempoolAcceptResult Success(int64_t vsize, Amount fees) {
         return MempoolAcceptResult(ResultType::VALID, vsize, fees);
     }
 
     /**
      * Constructor for already-in-mempool case. It wouldn't replace any
      * transactions.
      */
     static MempoolAcceptResult MempoolTx(int64_t vsize, Amount fees) {
         return MempoolAcceptResult(ResultType::MEMPOOL_ENTRY, vsize, fees);
     }
 
     // Private constructors. Use static methods MempoolAcceptResult::Success,
     // etc. to construct.
 private:
     /** Constructor for failure case */
     explicit MempoolAcceptResult(TxValidationState state)
         : m_result_type(ResultType::INVALID), m_state(state),
           m_base_fees(std::nullopt) {
         // Can be invalid or error
         Assume(!state.IsValid());
     }
 
     /** Generic constructor for success cases */
     explicit MempoolAcceptResult(ResultType result_type, int64_t vsize,
                                  Amount fees)
         : m_result_type(result_type), m_vsize{vsize}, m_base_fees(fees) {}
 };
 
 /**
  * Validation result for package mempool acceptance.
  */
 struct PackageMempoolAcceptResult {
     const PackageValidationState m_state;
     /**
      * Map from txid to finished MempoolAcceptResults. The client is
      * responsible for keeping track of the transaction objects themselves.
      * If a result is not present, it means validation was unfinished for that
      * transaction. If there was a package-wide error (see result in m_state),
      * m_tx_results will be empty.
      */
     std::map<const TxId, const MempoolAcceptResult> m_tx_results;
 
     explicit PackageMempoolAcceptResult(
         PackageValidationState state,
         std::map<const TxId, const MempoolAcceptResult> &&results)
         : m_state{state}, m_tx_results(std::move(results)) {}
 
     /**
      * Constructor to create a PackageMempoolAcceptResult from a
      * MempoolAcceptResult
      */
     explicit PackageMempoolAcceptResult(const TxId &txid,
                                         const MempoolAcceptResult &result)
         : m_tx_results{{txid, result}} {}
 };
 
 /**
  * Try to add a transaction to the mempool. This is an internal function and is
  * exposed only for testing. Client code should use
  * ChainstateManager::ProcessTransaction()
  *
  * @param[in]  config             The global configuration.
  * @param[in]  active_chainstate  Reference to the active chainstate.
  * @param[in]  tx                 The transaction to submit for mempool
  *                                acceptance.
  * @param[in]  accept_time        The timestamp for adding the transaction to
  *                                the mempool.
  *                                It is also used to determine when the entry
  *                                expires.
  * @param[in]  bypass_limits      When true, don't enforce mempool fee and
  *                                capacity limits.
  * @param[in]  test_accept        When true, run validation checks but don't
  *                                submit to mempool.
  * @param[in]  heightOverride     Override the block height of the transaction.
  *                                Used only upon reorg.
  *
  * @returns a MempoolAcceptResult indicating whether the transaction was
  *     accepted/rejected with reason.
  */
 MempoolAcceptResult
 AcceptToMemoryPool(const Config &config, Chainstate &active_chainstate,
                    const CTransactionRef &tx, int64_t accept_time,
                    bool bypass_limits, bool test_accept = false,
                    unsigned int heightOverride = 0)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Validate (and maybe submit) a package to the mempool.
  * See doc/policy/packages.md for full detailson package validation rules.
  *
  * @param[in]    test_accept     When true, run validation checks but don't
  *                               submit to mempool.
  * @returns a PackageMempoolAcceptResult which includes a MempoolAcceptResult
  *     for each transaction. If a transaction fails, validation will exit early
  *     and some results may be missing. It is also possible for the package to
  *     be partially submitted.
  */
 PackageMempoolAcceptResult
 ProcessNewPackage(const Config &config, Chainstate &active_chainstate,
                   CTxMemPool &pool, const Package &txns, bool test_accept)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Simple class for regulating resource usage during CheckInputScripts (and
  * CScriptCheck), atomic so as to be compatible with parallel validation.
  */
 class CheckInputsLimiter {
 protected:
     std::atomic<int64_t> remaining;
 
 public:
     explicit CheckInputsLimiter(int64_t limit) : remaining(limit) {}
 
     bool consume_and_check(int consumed) {
         auto newvalue = (remaining -= consumed);
         return newvalue >= 0;
     }
 
     bool check() { return remaining >= 0; }
 };
 
 class TxSigCheckLimiter : public CheckInputsLimiter {
 public:
     TxSigCheckLimiter() : CheckInputsLimiter(MAX_TX_SIGCHECKS) {}
 
     // Let's make this bad boy copiable.
     TxSigCheckLimiter(const TxSigCheckLimiter &rhs)
         : CheckInputsLimiter(rhs.remaining.load()) {}
 
     TxSigCheckLimiter &operator=(const TxSigCheckLimiter &rhs) {
         remaining = rhs.remaining.load();
         return *this;
     }
 
     static TxSigCheckLimiter getDisabled() {
         TxSigCheckLimiter txLimiter;
         // Historically, there has not been a transaction with more than 20k sig
         // checks on testnet or mainnet, so this effectively disable sigchecks.
         txLimiter.remaining = 20000;
         return txLimiter;
     }
 };
 
 class ConnectTrace;
 
 /**
  * Check whether all of this transaction's input scripts succeed.
  *
  * This involves ECDSA signature checks so can be computationally intensive.
  * This function should only be called after the cheap sanity checks in
  * CheckTxInputs passed.
  *
  * If pvChecks is not nullptr, script checks are pushed onto it instead of being
  * performed inline. Any script checks which are not necessary (eg due to script
  * execution cache hits) are, obviously, not pushed onto pvChecks/run.
  *
  * Upon success nSigChecksOut will be filled in with either:
  * - correct total for all inputs, or,
  * - 0, in the case when checks were pushed onto pvChecks (i.e., a cache miss
  * with pvChecks non-null), in which case the total can be found by executing
  * pvChecks and adding the results.
  *
  * Setting sigCacheStore/scriptCacheStore to false will remove elements from the
  * corresponding cache which are matched. This is useful for checking blocks
  * where we will likely never need the cache entry again.
  *
  * pLimitSigChecks can be passed to limit the sigchecks count either in parallel
  * or serial validation. With pvChecks null (serial validation), breaking the
  * pLimitSigChecks limit will abort evaluation early and return false. With
  * pvChecks not-null (parallel validation): the cached nSigChecks may itself
  * break the limit in which case false is returned, OR, each entry in the
  * returned pvChecks must be executed exactly once in order to probe the limit
  * accurately.
  */
 bool CheckInputScripts(const CTransaction &tx, TxValidationState &state,
                        const CCoinsViewCache &view, const uint32_t flags,
                        bool sigCacheStore, bool scriptCacheStore,
                        const PrecomputedTransactionData &txdata,
                        int &nSigChecksOut, TxSigCheckLimiter &txLimitSigChecks,
                        CheckInputsLimiter *pBlockLimitSigChecks,
                        std::vector<CScriptCheck> *pvChecks)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * Handy shortcut to full fledged CheckInputScripts call.
  */
 static inline bool
 CheckInputScripts(const CTransaction &tx, TxValidationState &state,
                   const CCoinsViewCache &view, const uint32_t flags,
                   bool sigCacheStore, bool scriptCacheStore,
                   const PrecomputedTransactionData &txdata, int &nSigChecksOut)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     TxSigCheckLimiter nSigChecksTxLimiter;
     return CheckInputScripts(tx, state, view, flags, sigCacheStore,
                              scriptCacheStore, txdata, nSigChecksOut,
                              nSigChecksTxLimiter, nullptr, nullptr);
 }
 
 /**
  * Mark all the coins corresponding to a given transaction inputs as spent.
  */
 void SpendCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                 int nHeight);
 
 /**
  * Apply the effects of this transaction on the UTXO set represented by view.
  */
 void UpdateCoins(CCoinsViewCache &view, const CTransaction &tx, CTxUndo &txundo,
                  int nHeight);
 
 /**
  * Check if transaction will be BIP68 final in the next block to be created on
  * top of tip.
  * @param[in]   tip             Chain tip to check tx sequence locks against.
  *     For example, the tip of the current active chain.
  * @param[in]   coins_view      Any CCoinsView that provides access to the
  *     relevant coins for checking sequence locks. For example, it can be a
  *     CCoinsViewCache that isn't connected to anything but contains all the
  *     relevant coins, or a CCoinsViewMemPool that is connected to the mempool
  *     and chainstate UTXO set. In the latter case, the caller is responsible
  *     for holding the appropriate locks to ensure that calls to GetCoin()
  *     return correct coins.
  * Simulates calling SequenceLocks() with data from the tip passed in.
  * Optionally stores in LockPoints the resulting height and time
  * calculated and the hash of the block needed for calculation or skips the
  * calculation and uses the LockPoints passed in for evaluation. The LockPoints
  * should not be considered valid if CheckSequenceLocksAtTip returns false.
  */
 bool CheckSequenceLocksAtTip(CBlockIndex *tip, const CCoinsView &coins_view,
                              const CTransaction &tx, LockPoints *lp = nullptr,
                              bool useExistingLockPoints = false);
 
 /**
  * Closure representing one script verification.
  * Note that this stores references to the spending transaction.
  *
  * Note that if pLimitSigChecks is passed, then failure does not imply that
  * scripts have failed.
  */
 class CScriptCheck {
 private:
     CTxOut m_tx_out;
     const CTransaction *ptxTo;
     unsigned int nIn;
     uint32_t nFlags;
     bool cacheStore;
     ScriptError error;
     ScriptExecutionMetrics metrics;
     PrecomputedTransactionData txdata;
     TxSigCheckLimiter *pTxLimitSigChecks;
     CheckInputsLimiter *pBlockLimitSigChecks;
 
 public:
     CScriptCheck()
         : ptxTo(nullptr), nIn(0), nFlags(0), cacheStore(false),
           error(ScriptError::UNKNOWN), txdata(), pTxLimitSigChecks(nullptr),
           pBlockLimitSigChecks(nullptr) {}
 
     CScriptCheck(const CTxOut &outIn, const CTransaction &txToIn,
                  unsigned int nInIn, uint32_t nFlagsIn, bool cacheIn,
                  const PrecomputedTransactionData &txdataIn,
                  TxSigCheckLimiter *pTxLimitSigChecksIn = nullptr,
                  CheckInputsLimiter *pBlockLimitSigChecksIn = nullptr)
         : m_tx_out(outIn), ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn),
           cacheStore(cacheIn), error(ScriptError::UNKNOWN), txdata(txdataIn),
           pTxLimitSigChecks(pTxLimitSigChecksIn),
           pBlockLimitSigChecks(pBlockLimitSigChecksIn) {}
 
     bool operator()();
 
     void swap(CScriptCheck &check) noexcept {
         std::swap(ptxTo, check.ptxTo);
         std::swap(m_tx_out, check.m_tx_out);
         std::swap(nIn, check.nIn);
         std::swap(nFlags, check.nFlags);
         std::swap(cacheStore, check.cacheStore);
         std::swap(error, check.error);
         std::swap(metrics, check.metrics);
         std::swap(txdata, check.txdata);
         std::swap(pTxLimitSigChecks, check.pTxLimitSigChecks);
         std::swap(pBlockLimitSigChecks, check.pBlockLimitSigChecks);
     }
 
     ScriptError GetScriptError() const { return error; }
 
     ScriptExecutionMetrics GetScriptExecutionMetrics() const { return metrics; }
 };
 
 /** Functions for validating blocks and updating the block tree */
 
 /**
  * Context-independent validity checks.
  *
  * Returns true if the provided block is valid (has valid header,
  * transactions are valid, block is a valid size, etc.)
  */
 bool CheckBlock(const CBlock &block, BlockValidationState &state,
                 const Consensus::Params &params,
                 BlockValidationOptions validationOptions);
 
 /**
  * This is a variant of ContextualCheckTransaction which computes the contextual
  * check for a transaction based on the chain tip.
  *
  * See consensus/consensus.h for flag definitions.
  */
 bool ContextualCheckTransactionForCurrentBlock(
     const CBlockIndex *active_chain_tip, const Consensus::Params &params,
     const CTransaction &tx, TxValidationState &state)
     EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
 /**
  * Check a block is completely valid from start to finish (only works on top of
  * our current best block)
  */
 bool TestBlockValidity(BlockValidationState &state, const CChainParams &params,
                        Chainstate &chainstate, const CBlock &block,
                        CBlockIndex *pindexPrev,
                        BlockValidationOptions validationOptions)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
 /**
  * RAII wrapper for VerifyDB: Verify consistency of the block and coin
  * databases.
  */
 class CVerifyDB {
 public:
     CVerifyDB();
 
     ~CVerifyDB();
 
     bool VerifyDB(Chainstate &chainstate, const Config &config,
                   CCoinsView &coinsview, int nCheckLevel, int nCheckDepth)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 };
 
 /** @see Chainstate::FlushStateToDisk */
 enum class FlushStateMode { NONE, IF_NEEDED, PERIODIC, ALWAYS };
 
 /**
  * A convenience class for constructing the CCoinsView* hierarchy used
  * to facilitate access to the UTXO set.
  *
  * This class consists of an arrangement of layered CCoinsView objects,
  * preferring to store and retrieve coins in memory via `m_cacheview` but
  * ultimately falling back on cache misses to the canonical store of UTXOs on
  * disk, `m_dbview`.
  */
 class CoinsViews {
 public:
     //! The lowest level of the CoinsViews cache hierarchy sits in a leveldb
     //! database on disk. All unspent coins reside in this store.
     CCoinsViewDB m_dbview GUARDED_BY(cs_main);
 
     //! This view wraps access to the leveldb instance and handles read errors
     //! gracefully.
     CCoinsViewErrorCatcher m_catcherview GUARDED_BY(cs_main);
 
     //! This is the top layer of the cache hierarchy - it keeps as many coins in
     //! memory as can fit per the dbcache setting.
     std::unique_ptr<CCoinsViewCache> m_cacheview GUARDED_BY(cs_main);
 
     //! This constructor initializes CCoinsViewDB and CCoinsViewErrorCatcher
     //! instances, but it *does not* create a CCoinsViewCache instance by
     //! default. This is done separately because the presence of the cache has
     //! implications on whether or not we're allowed to flush the cache's state
     //! to disk, which should not be done until the health of the database is
     //! verified.
     //!
     //! All arguments forwarded onto CCoinsViewDB.
     CoinsViews(std::string ldb_name, size_t cache_size_bytes, bool in_memory,
                bool should_wipe);
 
     //! Initialize the CCoinsViewCache member.
     void InitCache() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 };
 
 enum class CoinsCacheSizeState {
     //! The coins cache is in immediate need of a flush.
     CRITICAL = 2,
     //! The cache is at >= 90% capacity.
     LARGE = 1,
     OK = 0
 };
 
 /**
  * Chainstate stores and provides an API to update our local knowledge of the
  * current best chain.
  *
  * Eventually, the API here is targeted at being exposed externally as a
  * consumable libconsensus library, so any functions added must only call
  * other class member functions, pure functions in other parts of the consensus
  * library, callbacks via the validation interface, or read/write-to-disk
  * functions (eventually this will also be via callbacks).
  *
  * Anything that is contingent on the current tip of the chain is stored here,
  * whereas block information and metadata independent of the current tip is
  * kept in `BlockManager`.
  */
 class Chainstate {
 protected:
     /**
      * The ChainState Mutex.
      * A lock that must be held when modifying this ChainState.
      */
     Mutex m_chainstate_mutex;
 
     /**
      * Every received block is assigned a unique and increasing identifier, so
      * we know which one to give priority in case of a fork.
      * Blocks loaded from disk are assigned id 0, so start the counter at 1.
      */
     std::atomic<int32_t> nBlockSequenceId{1};
     /** Decreasing counter (used by subsequent preciousblock calls). */
     int32_t nBlockReverseSequenceId = -1;
     /** chainwork for the last block that preciousblock has been applied to. */
     arith_uint256 nLastPreciousChainwork = 0;
 
     /**
      * Whether this chainstate is undergoing initial block download.
      *
      * Mutable because we need to be able to mark IsInitialBlockDownload()
      * const, which latches this for caching purposes.
      */
     mutable std::atomic<bool> m_cached_finished_ibd{false};
 
     //! Optional mempool that is kept in sync with the chain.
     //! Only the active chainstate has a mempool.
     CTxMemPool *m_mempool;
 
     //! Manages the UTXO set, which is a reflection of the contents of
     //! `m_chain`.
     std::unique_ptr<CoinsViews> m_coins_views;
 
     //! This toggle exists for use when doing background validation for UTXO
     //! snapshots.
     //!
     //! In the expected case, it is set once the background validation chain
     //! reaches the same height as the base of the snapshot and its UTXO set is
     //! found to hash to the expected assumeutxo value. It signals that we
     //! should no longer connect blocks to the background chainstate. When set
     //! on the background validation chainstate, it signifies that we have fully
     //! validated the snapshot chainstate.
     //!
     //! In the unlikely case that the snapshot chainstate is found to be
     //! invalid, this is set to true on the snapshot chainstate.
     bool m_disabled GUARDED_BY(::cs_main){false};
 
     mutable Mutex cs_avalancheFinalizedBlockIndex;
 
     /**
      * The best block via avalanche voting.
      * This block cannot be reorged in any way except by explicit user action.
      */
     const CBlockIndex *m_avalancheFinalizedBlockIndex
         GUARDED_BY(cs_avalancheFinalizedBlockIndex) = nullptr;
 
     /**
      * Filter to prevent parking a block due to block policies more than once.
      * After first application of block policies, Avalanche voting will
      * determine the final acceptance state. Rare false positives will be
      * reconciled by the network and should not have any negative impact.
      */
     CRollingBloomFilter m_filterParkingPoliciesApplied =
         CRollingBloomFilter{1000, 0.000001};
 
     CBlockIndex const *m_best_fork_tip = nullptr;
     CBlockIndex const *m_best_fork_base = nullptr;
 
 public:
     //! Reference to a BlockManager instance which itself is shared across all
     //! Chainstate instances.
     node::BlockManager &m_blockman;
 
     /** Chain parameters for this chainstate */
     const CChainParams &m_params;
 
     //! The chainstate manager that owns this chainstate. The reference is
     //! necessary so that this instance can check whether it is the active
     //! chainstate within deeply nested method calls.
     ChainstateManager &m_chainman;
 
     explicit Chainstate(
         CTxMemPool *mempool, node::BlockManager &blockman,
         ChainstateManager &chainman,
         std::optional<BlockHash> from_snapshot_blockhash = std::nullopt);
 
     /**
      * Initialize the CoinsViews UTXO set database management data structures.
      * The in-memory cache is initialized separately.
      *
      * All parameters forwarded to CoinsViews.
      */
     void InitCoinsDB(size_t cache_size_bytes, bool in_memory, bool should_wipe,
                      std::string leveldb_name = "chainstate");
 
     //! Initialize the in-memory coins cache (to be done after the health of the
     //! on-disk database is verified).
     void InitCoinsCache(size_t cache_size_bytes)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! @returns whether or not the CoinsViews object has been fully initialized
     //! and we can
     //!          safely flush this object to disk.
     bool CanFlushToDisk() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
         AssertLockHeld(::cs_main);
         return m_coins_views && m_coins_views->m_cacheview;
     }
 
     //! The current chain of blockheaders we consult and build on.
     //! @see CChain, CBlockIndex.
     CChain m_chain;
 
     /**
      * The blockhash which is the base of the snapshot this chainstate was
      * created from.
      *
      * std::nullopt if this chainstate was not created from a snapshot.
      */
     const std::optional<BlockHash> m_from_snapshot_blockhash{};
 
     //! Return true if this chainstate relies on blocks that are assumed-valid.
     //! In practice this means it was created based on a UTXO snapshot.
     bool reliesOnAssumedValid() {
         return m_from_snapshot_blockhash.has_value();
     }
 
     /**
      * The set of all CBlockIndex entries with either BLOCK_VALID_TRANSACTIONS
      * (for itself and all ancestors) *or* BLOCK_ASSUMED_VALID (if using
      * background chainstates) and as good as our current tip or better.
      * Entries may be failed, though, and pruning nodes may be missing the data
      * for the block.
      */
     std::set<CBlockIndex *, CBlockIndexWorkComparator> setBlockIndexCandidates;
 
     //! @returns A reference to the in-memory cache of the UTXO set.
     CCoinsViewCache &CoinsTip() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
         AssertLockHeld(::cs_main);
         Assert(m_coins_views);
         return *Assert(m_coins_views->m_cacheview);
     }
 
     //! @returns A reference to the on-disk UTXO set database.
     CCoinsViewDB &CoinsDB() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
         AssertLockHeld(::cs_main);
         return Assert(m_coins_views)->m_dbview;
     }
 
     //! @returns A pointer to the mempool.
     CTxMemPool *GetMempool() { return m_mempool; }
 
     //! @returns A reference to a wrapped view of the in-memory UTXO set that
     //!     handles disk read errors gracefully.
     CCoinsViewErrorCatcher &CoinsErrorCatcher()
         EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
         AssertLockHeld(::cs_main);
         return Assert(m_coins_views)->m_catcherview;
     }
 
     //! Destructs all objects related to accessing the UTXO set.
     void ResetCoinsViews() { m_coins_views.reset(); }
 
     //! Does this chainstate have a UTXO set attached?
     bool HasCoinsViews() const { return (bool)m_coins_views; }
 
     //! The cache size of the on-disk coins view.
     size_t m_coinsdb_cache_size_bytes{0};
 
     //! The cache size of the in-memory coins view.
     size_t m_coinstip_cache_size_bytes{0};
 
     //! Resize the CoinsViews caches dynamically and flush state to disk.
     //! @returns true unless an error occurred during the flush.
     bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     /** Import blocks from an external file */
     void LoadExternalBlockFile(const Config &config, FILE *fileIn,
                                FlatFilePos *dbp = nullptr)
-        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
+                                 !cs_avalancheFinalizedBlockIndex);
 
     /**
      * Update the on-disk chain state.
      * The caches and indexes are flushed depending on the mode we're called
      * with if they're too large, if it's been a while since the last write, or
      * always and in all cases if we're in prune mode and are deleting files.
      *
      * If FlushStateMode::NONE is used, then FlushStateToDisk(...) won't do
      * anything besides checking if we need to prune.
      *
      * @returns true unless a system error occurred
      */
     bool FlushStateToDisk(BlockValidationState &state, FlushStateMode mode,
                           int nManualPruneHeight = 0);
 
     //! Unconditionally flush all changes to disk.
     void ForceFlushStateToDisk();
 
     //! Prune blockfiles from the disk if necessary and then flush chainstate
     //! changes if we pruned.
     void PruneAndFlush();
 
     /**
      * Find the best known block, and make it the tip of the block chain. The
      * result is either failure or an activated best chain. pblock is either
      * nullptr or a pointer to a block that is already loaded (to avoid loading
      * it again from disk).
      *
      * ActivateBestChain is split into steps (see ActivateBestChainStep) so that
      * we avoid holding cs_main for an extended period of time; the length of
      * this call may be quite long during reindexing or a substantial reorg.
      *
      * May not be called with cs_main held. May not be called in a
      * validationinterface callback.
      *
      * Note that if this is called while a snapshot chainstate is active, and if
      * it is called on a background chainstate whose tip has reached the base
      * block of the snapshot, its execution will take *MINUTES* while it hashes
      * the background UTXO set to verify the assumeutxo value the snapshot was
      * activated with. `cs_main` will be held during this time.
      *
      * @returns true unless a system error occurred
      */
     bool ActivateBestChain(const Config &config, BlockValidationState &state,
                            std::shared_ptr<const CBlock> pblock = nullptr)
-        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex) LOCKS_EXCLUDED(cs_main);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
+                                 !cs_avalancheFinalizedBlockIndex)
+            LOCKS_EXCLUDED(cs_main);
 
     bool AcceptBlock(const Config &config,
                      const std::shared_ptr<const CBlock> &pblock,
                      BlockValidationState &state, bool fRequested,
                      const FlatFilePos *dbp, bool *fNewBlock)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     // Block (dis)connection on a given view:
     DisconnectResult DisconnectBlock(const CBlock &block,
                                      const CBlockIndex *pindex,
                                      CCoinsViewCache &view)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
     bool ConnectBlock(const CBlock &block, BlockValidationState &state,
                       CBlockIndex *pindex, CCoinsViewCache &view,
                       BlockValidationOptions options,
                       Amount *blockFees = nullptr, bool fJustCheck = false)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     // Apply the effects of a block disconnection on the UTXO set.
     bool DisconnectTip(BlockValidationState &state,
                        DisconnectedBlockTransactions *disconnectpool)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
 
     // Manual block validity manipulation:
     /**
      * Mark a block as precious and reorganize.
      *
      * May not be called in a validationinterface callback.
      */
     bool PreciousBlock(const Config &config, BlockValidationState &state,
                        CBlockIndex *pindex)
-        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex) LOCKS_EXCLUDED(cs_main);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
+                                 !cs_avalancheFinalizedBlockIndex)
+            LOCKS_EXCLUDED(cs_main);
     /** Mark a block as invalid. */
     bool InvalidateBlock(const Config &config, BlockValidationState &state,
                          CBlockIndex *pindex) LOCKS_EXCLUDED(cs_main)
-        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
+                                 !cs_avalancheFinalizedBlockIndex);
     /** Park a block. */
     bool ParkBlock(const Config &config, BlockValidationState &state,
                    CBlockIndex *pindex) LOCKS_EXCLUDED(cs_main)
-        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex);
+        EXCLUSIVE_LOCKS_REQUIRED(!m_chainstate_mutex,
+                                 !cs_avalancheFinalizedBlockIndex);
 
     /**
      * Mark a block as finalized by avalanche.
      */
-    bool AvalancheFinalizeBlock(CBlockIndex *pindex);
+    bool AvalancheFinalizeBlock(CBlockIndex *pindex)
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_avalancheFinalizedBlockIndex);
 
     /**
      * Clear avalanche finalization.
      */
-    void ClearAvalancheFinalizedBlock();
+    void ClearAvalancheFinalizedBlock()
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_avalancheFinalizedBlockIndex);
 
     /**
      * Checks if a block is finalized by avalanche voting.
      */
-    bool IsBlockAvalancheFinalized(const CBlockIndex *pindex) const;
+    bool IsBlockAvalancheFinalized(const CBlockIndex *pindex) const
+        EXCLUSIVE_LOCKS_REQUIRED(!cs_avalancheFinalizedBlockIndex);
 
     /** Remove invalidity status from a block and its descendants. */
     void ResetBlockFailureFlags(CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     template <typename F>
     bool UpdateFlagsForBlock(CBlockIndex *pindexBase, CBlockIndex *pindex, F f)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     template <typename F, typename C, typename AC>
     void UpdateFlags(CBlockIndex *pindex, CBlockIndex *&pindexReset, F f,
                      C fChild, AC fAncestorWasChanged)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Remove parked status from a block and its descendants. */
     void UnparkBlockAndChildren(CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Remove parked status from a block. */
     void UnparkBlock(CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Replay blocks that aren't fully applied to the database. */
     bool ReplayBlocks();
 
     /**
      * Ensures we have a genesis block in the block tree, possibly writing one
      * to disk.
      */
     bool LoadGenesisBlock();
 
     void PruneBlockIndexCandidates();
 
     void UnloadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Check whether we are doing an initial block download (synchronizing from
      * disk or network)
      */
     bool IsInitialBlockDownload() const;
 
     /** Find the last common block of this chain and a locator. */
     const CBlockIndex *FindForkInGlobalIndex(const CBlockLocator &locator) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Make various assertions about the state of the block index.
      *
      * By default this only executes fully when using the Regtest chain; see:
      * fCheckBlockIndex.
      */
     void CheckBlockIndex();
 
     /** Load the persisted mempool from disk */
     void LoadMempool(const Config &config, const ArgsManager &args);
 
     /** Update the chain tip based on database information, i.e. CoinsTip()'s
      * best block. */
     bool LoadChainTip() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     //! Dictates whether we need to flush the cache to disk or not.
     //!
     //! @return the state of the size of the coins cache.
     CoinsCacheSizeState GetCoinsCacheSizeState()
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     CoinsCacheSizeState
     GetCoinsCacheSizeState(size_t max_coins_cache_size_bytes,
                            size_t max_mempool_size_bytes)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     std::string ToString() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! Indirection necessary to make lock annotations work with an optional
     //! mempool.
     RecursiveMutex *MempoolMutex() const LOCK_RETURNED(m_mempool->cs) {
         return m_mempool ? &m_mempool->cs : nullptr;
     }
 
 private:
     bool ActivateBestChainStep(const Config &config,
                                BlockValidationState &state,
                                CBlockIndex *pindexMostWork,
                                const std::shared_ptr<const CBlock> &pblock,
                                bool &fInvalidFound, ConnectTrace &connectTrace)
-        EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
+        EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs,
+                                 !cs_avalancheFinalizedBlockIndex);
     bool ConnectTip(const Config &config, BlockValidationState &state,
                     BlockPolicyValidationState &blockPolicyState,
                     CBlockIndex *pindexNew,
                     const std::shared_ptr<const CBlock> &pblock,
                     ConnectTrace &connectTrace,
                     DisconnectedBlockTransactions &disconnectpool)
-        EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs);
+        EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_mempool->cs,
+                                 !cs_avalancheFinalizedBlockIndex);
     void InvalidBlockFound(CBlockIndex *pindex,
                            const BlockValidationState &state)
-        EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+        EXCLUSIVE_LOCKS_REQUIRED(cs_main, !cs_avalancheFinalizedBlockIndex);
     CBlockIndex *
     FindMostWorkChain(std::vector<const CBlockIndex *> &blocksToReconcile)
-        EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+        EXCLUSIVE_LOCKS_REQUIRED(cs_main, !cs_avalancheFinalizedBlockIndex);
     void ReceivedBlockTransactions(const CBlock &block, CBlockIndex *pindexNew,
                                    const FlatFilePos &pos)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool RollforwardBlock(const CBlockIndex *pindex, CCoinsViewCache &inputs)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     void UnparkBlockImpl(CBlockIndex *pindex, bool fClearChildren)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool UnwindBlock(const Config &config, BlockValidationState &state,
                      CBlockIndex *pindex, bool invalidate)
-        EXCLUSIVE_LOCKS_REQUIRED(m_chainstate_mutex);
+        EXCLUSIVE_LOCKS_REQUIRED(m_chainstate_mutex,
+                                 !cs_avalancheFinalizedBlockIndex);
 
     void CheckForkWarningConditions() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     void CheckForkWarningConditionsOnNewFork(CBlockIndex *pindexNewForkTip)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     void InvalidChainFound(CBlockIndex *pindexNew)
-        EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+        EXCLUSIVE_LOCKS_REQUIRED(cs_main, !cs_avalancheFinalizedBlockIndex);
 
     const CBlockIndex *FindBlockToFinalize(CBlockIndex *pindexNew)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Check warning conditions and do some notifications on new chain tip set.
      */
     void UpdateTip(const CBlockIndex *pindexNew)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     /**
      * In case of an invalid snapshot, rename the coins leveldb directory so
      * that it can be examined for issue diagnosis.
      */
     void InvalidateCoinsDBOnDisk() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     friend ChainstateManager;
 };
 
 enum class SnapshotCompletionResult {
     SUCCESS,
     SKIPPED,
 
     // Expected assumeutxo configuration data is not found for the height of the
     // base block.
     MISSING_CHAINPARAMS,
 
     // Failed to generate UTXO statistics (to check UTXO set hash) for the
     // background chainstate.
     STATS_FAILED,
 
     // The UTXO set hash of the background validation chainstate does not match
     // the one expected by assumeutxo chainparams.
     HASH_MISMATCH,
 
     // The blockhash of the current tip of the background validation chainstate
     // does not match the one expected by the snapshot chainstate.
     BASE_BLOCKHASH_MISMATCH,
 };
 
 /**
  * Provides an interface for creating and interacting with one or two
  * chainstates: an IBD chainstate generated by downloading blocks, and
  * an optional snapshot chainstate loaded from a UTXO snapshot. Managed
  * chainstates can be maintained at different heights simultaneously.
  *
  * This class provides abstractions that allow the retrieval of the current
  * most-work chainstate ("Active") as well as chainstates which may be in
  * background use to validate UTXO snapshots.
  *
  * Definitions:
  *
  * *IBD chainstate*: a chainstate whose current state has been "fully"
  *   validated by the initial block download process.
  *
  * *Snapshot chainstate*: a chainstate populated by loading in an
  *    assumeutxo UTXO snapshot.
  *
  * *Active chainstate*: the chainstate containing the current most-work
  *    chain. Consulted by most parts of the system (net_processing,
  *    wallet) as a reflection of the current chain and UTXO set.
  *    This may either be an IBD chainstate or a snapshot chainstate.
  *
  * *Background IBD chainstate*: an IBD chainstate for which the
  *    IBD process is happening in the background while use of the
  *    active (snapshot) chainstate allows the rest of the system to function.
  */
 class ChainstateManager {
 private:
     //! The chainstate used under normal operation (i.e. "regular" IBD) or, if
     //! a snapshot is in use, for background validation.
     //!
     //! Its contents (including on-disk data) will be deleted *upon shutdown*
     //! after background validation of the snapshot has completed. We do not
     //! free the chainstate contents immediately after it finishes validation
     //! to cautiously avoid a case where some other part of the system is still
     //! using this pointer (e.g. net_processing).
     //!
     //! Once this pointer is set to a corresponding chainstate, it will not
     //! be reset until init.cpp:Shutdown().
     //!
     //! This is especially important when, e.g., calling ActivateBestChain()
     //! on all chainstates because we are not able to hold ::cs_main going into
     //! that call.
     std::unique_ptr<Chainstate> m_ibd_chainstate GUARDED_BY(::cs_main);
 
     //! A chainstate initialized on the basis of a UTXO snapshot. If this is
     //! non-null, it is always our active chainstate.
     //!
     //! Once this pointer is set to a corresponding chainstate, it will not
     //! be reset until init.cpp:Shutdown().
     //!
     //! This is especially important when, e.g., calling ActivateBestChain()
     //! on all chainstates because we are not able to hold ::cs_main going into
     //! that call.
     std::unique_ptr<Chainstate> m_snapshot_chainstate GUARDED_BY(::cs_main);
 
     //! Points to either the ibd or snapshot chainstate; indicates our
     //! most-work chain.
     //!
     //! Once this pointer is set to a corresponding chainstate, it will not
     //! be reset until init.cpp:Shutdown().
     //!
     //! This is especially important when, e.g., calling ActivateBestChain()
     //! on all chainstates because we are not able to hold ::cs_main going into
     //! that call.
     Chainstate *m_active_chainstate GUARDED_BY(::cs_main){nullptr};
 
     CBlockIndex *m_best_invalid GUARDED_BY(::cs_main){nullptr};
     CBlockIndex *m_best_parked GUARDED_BY(::cs_main){nullptr};
 
     const Config &m_config;
 
     //! Internal helper for ActivateSnapshot().
     [[nodiscard]] bool
     PopulateAndValidateSnapshot(Chainstate &snapshot_chainstate,
                                 AutoFile &coins_file,
                                 const node::SnapshotMetadata &metadata);
     /**
      * If a block header hasn't already been seen, call CheckBlockHeader on it,
      * ensure that it doesn't descend from an invalid block, and then add it to
      * m_block_index.
      */
     bool AcceptBlockHeader(const Config &config, const CBlockHeader &block,
                            BlockValidationState &state, CBlockIndex **ppindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     friend Chainstate;
 
     //! Returns nullptr if no snapshot has been loaded.
     const CBlockIndex *GetSnapshotBaseBlock() const
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! Return the height of the base block of the snapshot in use, if one
     //! exists, else nullopt.
     std::optional<int> GetSnapshotBaseHeight() const
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! Return true if a chainstate is considered usable.
     //!
     //! This is false when a background validation chainstate has completed its
     //! validation of an assumed-valid chainstate, or when a snapshot
     //! chainstate has been found to be invalid.
     bool IsUsable(const Chainstate *const pchainstate) const
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
         return pchainstate && !pchainstate->m_disabled;
     }
 
 public:
     explicit ChainstateManager(const Config &config) : m_config{config} {}
 
     const CChainParams &GetParams() const { return m_config.GetChainParams(); }
     const Consensus::Params &GetConsensus() const {
         return m_config.GetChainParams().GetConsensus();
     }
 
     /**
      * Alias for ::cs_main.
      * Should be used in new code to make it easier to make ::cs_main a member
      * of this class.
      * Generally, methods of this class should be annotated to require this
      * mutex. This will make calling code more verbose, but also help to:
      * - Clarify that the method will acquire a mutex that heavily affects
      *   overall performance.
      * - Force call sites to think how long they need to acquire the mutex to
      *   get consistent results.
      */
     RecursiveMutex &GetMutex() const LOCK_RETURNED(::cs_main) {
         return ::cs_main;
     }
 
     std::thread m_load_block;
     //! A single BlockManager instance is shared across each constructed
     //! chainstate to avoid duplicating block metadata.
     node::BlockManager m_blockman;
 
     /**
      * In order to efficiently track invalidity of headers, we keep the set of
      * blocks which we tried to connect and found to be invalid here (ie which
      * were set to BLOCK_FAILED_VALID since the last restart). We can then
      * walk this set and check if a new header is a descendant of something in
      * this set, preventing us from having to walk m_block_index when we try
      * to connect a bad block and fail.
      *
      * While this is more complicated than marking everything which descends
      * from an invalid block as invalid at the time we discover it to be
      * invalid, doing so would require walking all of m_block_index to find all
      * descendants. Since this case should be very rare, keeping track of all
      * BLOCK_FAILED_VALID blocks in a set should be just fine and work just as
      * well.
      *
      * Because we already walk m_block_index in height-order at startup, we go
      * ahead and mark descendants of invalid blocks as FAILED_CHILD at that
      * time, instead of putting things in this set.
      */
     std::set<CBlockIndex *> m_failed_blocks;
 
     /**
      * Best header we've seen so far (used for getheaders queries' starting
      * points).
      */
     CBlockIndex *m_best_header = nullptr;
 
     //! The total number of bytes available for us to use across all in-memory
     //! coins caches. This will be split somehow across chainstates.
     int64_t m_total_coinstip_cache{0};
     //
     //! The total number of bytes available for us to use across all leveldb
     //! coins databases. This will be split somehow across chainstates.
     int64_t m_total_coinsdb_cache{0};
 
     //! Instantiate a new chainstate.
     //!
     //! @param[in] mempool              The mempool to pass to the chainstate
     //                                  constructor
     Chainstate &InitializeChainstate(CTxMemPool *mempool)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! Get all chainstates currently being used.
     std::vector<Chainstate *> GetAll();
 
     //! Construct and activate a Chainstate on the basis of UTXO snapshot data.
     //!
     //! Steps:
     //!
     //! - Initialize an unused Chainstate.
     //! - Load its `CoinsViews` contents from `coins_file`.
     //! - Verify that the hash of the resulting coinsdb matches the expected
     //!   hash per assumeutxo chain parameters.
     //! - Wait for our headers chain to include the base block of the snapshot.
     //! - "Fast forward" the tip of the new chainstate to the base of the
     //!   snapshot, faking nTx* block index data along the way.
     //! - Move the new chainstate to `m_snapshot_chainstate` and make it our
     //!   ActiveChainstate().
     [[nodiscard]] bool ActivateSnapshot(AutoFile &coins_file,
                                         const node::SnapshotMetadata &metadata,
                                         bool in_memory);
 
     //! Once the background validation chainstate has reached the height which
     //! is the base of the UTXO snapshot in use, compare its coins to ensure
     //! they match those expected by the snapshot.
     //!
     //! If the coins match (expected), then mark the validation chainstate for
     //! deletion and continue using the snapshot chainstate as active.
     //! Otherwise, revert to using the ibd chainstate and shutdown.
     SnapshotCompletionResult MaybeCompleteSnapshotValidation(
         std::function<void(bilingual_str)> shutdown_fnc =
             [](bilingual_str msg) { AbortNode(msg.original, msg); })
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! The most-work chain.
     Chainstate &ActiveChainstate() const;
     CChain &ActiveChain() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
         return ActiveChainstate().m_chain;
     }
     int ActiveHeight() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
         return ActiveChain().Height();
     }
     CBlockIndex *ActiveTip() const EXCLUSIVE_LOCKS_REQUIRED(GetMutex()) {
         return ActiveChain().Tip();
     }
 
     node::BlockMap &BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
         AssertLockHeld(::cs_main);
         return m_blockman.m_block_index;
     }
 
     //! @returns true if a snapshot-based chainstate is in use. Also implies
     //!          that a background validation chainstate is also in use.
     bool IsSnapshotActive() const;
 
     std::optional<BlockHash> SnapshotBlockhash() const;
 
     //! Is there a snapshot in use and has it been fully validated?
     bool IsSnapshotValidated() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
         return m_snapshot_chainstate && m_ibd_chainstate &&
                m_ibd_chainstate->m_disabled;
     }
 
     /**
      * Process an incoming block. This only returns after the best known valid
      * block is made active. Note that it does not, however, guarantee that the
      * specific block passed to it has been checked for validity!
      *
      * If you want to *possibly* get feedback on whether block is valid, you
      * must install a CValidationInterface (see validationinterface.h) - this
      * will have its BlockChecked method called whenever *any* block completes
      * validation.
      *
      * Note that we guarantee that either the proof-of-work is valid on block,
      * or (and possibly also) BlockChecked will have been called.
      *
      * May not be called in a validationinterface callback.
      *
      * @param[in]   config  The global config.
      * @param[in]   block  The block we want to process.
      * @param[in]   force_processing Process this block even if unrequested;
      *              used for non-network block sources.
      * @param[out]  new_block A boolean which is set to indicate if the block
      * was first received via this call.
      * @returns     If the block was processed, independently of block validity
      */
     bool ProcessNewBlock(const Config &config,
                          const std::shared_ptr<const CBlock> &block,
                          bool force_processing, bool *new_block)
         LOCKS_EXCLUDED(cs_main);
 
     /**
      * Process incoming block headers.
      *
      * May not be called in a validationinterface callback.
      *
      * @param[in]  config        The config.
      * @param[in]  block         The block headers themselves.
      * @param[out] state         This may be set to an Error state if any error
      *                           occurred processing them.
      * @param[out] ppindex       If set, the pointer will be set to point to the
      *                           last new block index object for the given
      * headers.
      * @return True if block headers were accepted as valid.
      */
     bool ProcessNewBlockHeaders(const Config &config,
                                 const std::vector<CBlockHeader> &block,
                                 BlockValidationState &state,
                                 const CBlockIndex **ppindex = nullptr)
         LOCKS_EXCLUDED(cs_main);
 
     /**
      * Try to add a transaction to the memory pool.
      *
      * @param[in]  tx              The transaction to submit for mempool
      *                             acceptance.
      * @param[in]  test_accept     When true, run validation checks but don't
      *                             submit to mempool.
      */
     [[nodiscard]] MempoolAcceptResult
     ProcessTransaction(const CTransactionRef &tx, bool test_accept = false)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     //! Load the block tree and coins database from disk, initializing state if
     //! we're running with -reindex
     bool LoadBlockIndex() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     //! Check to see if caches are out of balance and if so, call
     //! ResizeCoinsCaches() as needed.
     void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! When starting up, search the datadir for a chainstate based on a UTXO
     //! snapshot that is in the process of being validated.
     bool DetectSnapshotChainstate(CTxMemPool *mempool)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     void ResetChainstates() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! Switch the active chainstate to one based on a UTXO snapshot that was
     //! loaded previously.
     Chainstate &ActivateExistingSnapshot(CTxMemPool *mempool,
                                          BlockHash base_blockhash)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     //! If we have validated a snapshot chain during this runtime, copy its
     //! chainstate directory over to the main `chainstate` location, completing
     //! validation of the snapshot.
     //!
     //! If the cleanup succeeds, the caller will need to ensure chainstates are
     //! reinitialized, since ResetChainstates() will be called before leveldb
     //! directories are moved or deleted.
     //!
     //! @sa node/chainstate:LoadChainstate()
     bool ValidatedSnapshotCleanup() EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 };
 
 /** Dump the mempool to disk. */
 bool DumpMempool(const CTxMemPool &pool);
 
 /** Load the mempool from disk. */
 bool LoadMempool(const Config &config, CTxMemPool &pool,
                  Chainstate &active_chainstate);
 
 /**
  * Return the expected assumeutxo value for a given height, if one exists.
  *
  * @param[in] height Get the assumeutxo value for this height.
  *
  * @returns empty if no assumeutxo configuration exists for the given height.
  */
 const AssumeutxoData *ExpectedAssumeutxo(const int height,
                                          const CChainParams &params);
 
 #endif // BITCOIN_VALIDATION_H