diff --git a/src/avalanche/processor.cpp b/src/avalanche/processor.cpp
index c871f7eaf..e041e3be7 100644
--- a/src/avalanche/processor.cpp
+++ b/src/avalanche/processor.cpp
@@ -1,518 +1,516 @@
 // Copyright (c) 2018-2019 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <avalanche/processor.h>
 
 #include <avalanche/peermanager.h>
 #include <chain.h>
 #include <netmessagemaker.h>
 #include <reverse_iterator.h>
 #include <scheduler.h>
 #include <util/bitmanip.h>
 #include <validation.h>
 
 #include <chrono>
 #include <tuple>
 
 /**
  * Run the avalanche event loop every 10ms.
  */
 static constexpr std::chrono::milliseconds AVALANCHE_TIME_STEP{10};
 
 // Unfortunately, the bitcoind codebase is full of global and we are kinda
 // forced into it here.
 std::unique_ptr<avalanche::Processor> g_avalanche;
 
 namespace avalanche {
 
 bool VoteRecord::registerVote(NodeId nodeid, uint32_t error) {
     // We just got a new vote, so there is one less inflight request.
     clearInflightRequest();
 
     // We want to avoid having the same node voting twice in a quorum.
     if (!addNodeToQuorum(nodeid)) {
         return false;
     }
 
     /**
      * The result of the vote is determined from the error code. If the error
      * code is 0, there is no error and therefore the vote is yes. If there is
      * an error, we check the most significant bit to decide if the vote is a no
      * (for instance, the block is invalid) or is the vote inconclusive (for
      * instance, the queried node does not have the block yet).
      */
     votes = (votes << 1) | (error == 0);
     consider = (consider << 1) | (int32_t(error) >= 0);
 
     /**
      * We compute the number of yes and/or no votes as follow:
      *
      * votes:     1010
      * consider:  1100
      *
      * yes votes: 1000 using votes & consider
      * no votes:  0100 using ~votes & consider
      */
     bool yes = countBits(votes & consider & 0xff) > 6;
     if (!yes) {
         bool no = countBits(~votes & consider & 0xff) > 6;
         if (!no) {
             // The round is inconclusive.
             return false;
         }
     }
 
     // If the round is in agreement with previous rounds, increase confidence.
     if (isAccepted() == yes) {
         confidence += 2;
         return getConfidence() == AVALANCHE_FINALIZATION_SCORE;
     }
 
     // The round changed our state. We reset the confidence.
     confidence = yes;
     return true;
 }
 
 bool VoteRecord::addNodeToQuorum(NodeId nodeid) {
     if (nodeid == NO_NODE) {
         // Helpful for testing.
         return true;
     }
 
     // MMIX Linear Congruent Generator.
     const uint64_t r1 =
         6364136223846793005 * uint64_t(nodeid) + 1442695040888963407;
     // Fibonacci hashing.
     const uint64_t r2 = 11400714819323198485ull * (nodeid ^ seed);
     // Combine and extract hash.
     const uint16_t h = (r1 + r2) >> 48;
 
     /**
      * Check if the node is in the filter.
      */
     for (size_t i = 1; i < nodeFilter.size(); i++) {
         if (nodeFilter[(successfulVotes + i) % nodeFilter.size()] == h) {
             return false;
         }
     }
 
     /**
      * Add the node which just voted to the filter.
      */
     nodeFilter[successfulVotes % nodeFilter.size()] = h;
     successfulVotes++;
     return true;
 }
 
 bool VoteRecord::registerPoll() const {
     uint8_t count = inflight.load();
     while (count < AVALANCHE_MAX_INFLIGHT_POLL) {
         if (inflight.compare_exchange_weak(count, count + 1)) {
             return true;
         }
     }
 
     return false;
 }
 
 static bool IsWorthPolling(const CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
 
     if (pindex->nStatus.isInvalid()) {
         // No point polling invalid blocks.
         return false;
     }
 
     if (IsBlockFinalized(pindex)) {
         // There is no point polling finalized block.
         return false;
     }
 
     return true;
 }
 
 Processor::Processor(CConnman *connmanIn)
     : connman(connmanIn), queryTimeoutDuration(AVALANCHE_DEFAULT_QUERY_TIMEOUT),
       round(0), peerManager(std::make_unique<PeerManager>()) {
     // Pick a random key for the session.
     sessionKey.MakeNewKey(true);
 }
 
 Processor::~Processor() {
     stopEventLoop();
 }
 
 bool Processor::addBlockToReconcile(const CBlockIndex *pindex) {
     bool isAccepted;
 
     {
         LOCK(cs_main);
         if (!IsWorthPolling(pindex)) {
             // There is no point polling this block.
             return false;
         }
 
         isAccepted = ::ChainActive().Contains(pindex);
     }
 
     return vote_records.getWriteView()
         ->insert(std::make_pair(pindex, VoteRecord(isAccepted)))
         .second;
 }
 
 bool Processor::isAccepted(const CBlockIndex *pindex) const {
     auto r = vote_records.getReadView();
     auto it = r->find(pindex);
     if (it == r.end()) {
         return false;
     }
 
     return it->second.isAccepted();
 }
 
 int Processor::getConfidence(const CBlockIndex *pindex) const {
     auto r = vote_records.getReadView();
     auto it = r->find(pindex);
     if (it == r.end()) {
         return -1;
     }
 
     return it->second.getConfidence();
 }
 
 namespace {
     /**
      * When using TCP, we need to sign all messages as the transport layer is
      * not secure.
      */
-    class TCPAvalancheResponse {
-        AvalancheResponse response;
+    class TCPResponse {
+        Response response;
         std::array<uint8_t, 64> sig;
 
     public:
-        TCPAvalancheResponse(AvalancheResponse responseIn, const CKey &key)
+        TCPResponse(Response responseIn, const CKey &key)
             : response(std::move(responseIn)) {
             CHashWriter hasher(SER_GETHASH, 0);
             hasher << response;
             const uint256 hash = hasher.GetHash();
 
             // Now let's sign!
             std::vector<uint8_t> vchSig;
             if (key.SignSchnorr(hash, vchSig)) {
                 // Schnorr sigs are 64 bytes in size.
                 assert(vchSig.size() == 64);
                 std::copy(vchSig.begin(), vchSig.end(), sig.begin());
             } else {
                 sig.fill(0);
             }
         }
 
         // serialization support
         ADD_SERIALIZE_METHODS;
 
         template <typename Stream, typename Operation>
         inline void SerializationOp(Stream &s, Operation ser_action) {
             READWRITE(response);
             READWRITE(sig);
         }
     };
 } // namespace
 
-void Processor::sendResponse(CNode *pfrom, AvalancheResponse response) const {
+void Processor::sendResponse(CNode *pfrom, Response response) const {
     connman->PushMessage(
-        pfrom,
-        CNetMsgMaker(pfrom->GetSendVersion())
-            .Make(NetMsgType::AVARESPONSE,
-                  TCPAvalancheResponse(std::move(response), sessionKey)));
+        pfrom, CNetMsgMaker(pfrom->GetSendVersion())
+                   .Make(NetMsgType::AVARESPONSE,
+                         TCPResponse(std::move(response), sessionKey)));
 }
 
-bool Processor::registerVotes(NodeId nodeid, const AvalancheResponse &response,
+bool Processor::registerVotes(NodeId nodeid, const Response &response,
                               std::vector<BlockUpdate> &updates) {
     {
         // Save the time at which we can query again.
         LOCK(cs_peerManager);
 
         // FIXME: This will override the time even when we received an old stale
         // message. This should check that the message is indeed the most up to
         // date one before updating the time.
         peerManager->updateNextRequestTime(
             nodeid, std::chrono::steady_clock::now() +
                         std::chrono::milliseconds(response.getCooldown()));
     }
 
     std::vector<CInv> invs;
 
     {
         // Check that the query exists.
         auto w = queries.getWriteView();
         auto it = w->find(std::make_tuple(nodeid, response.getRound()));
         if (it == w.end()) {
             // NB: The request may be old, so we don't increase banscore.
             return false;
         }
 
         invs = std::move(it->invs);
         w->erase(it);
     }
 
     // Verify that the request and the vote are consistent.
-    const std::vector<AvalancheVote> &votes = response.GetVotes();
+    const std::vector<Vote> &votes = response.GetVotes();
     size_t size = invs.size();
     if (votes.size() != size) {
         // TODO: increase banscore for inconsistent response.
         // NB: This isn't timeout but actually node misbehaving.
         return false;
     }
 
     for (size_t i = 0; i < size; i++) {
         if (invs[i].hash != votes[i].GetHash()) {
             // TODO: increase banscore for inconsistent response.
             // NB: This isn't timeout but actually node misbehaving.
             return false;
         }
     }
 
-    std::map<CBlockIndex *, AvalancheVote> responseIndex;
+    std::map<CBlockIndex *, Vote> responseIndex;
 
     {
         LOCK(cs_main);
         for (const auto &v : votes) {
             BlockMap::iterator mi = mapBlockIndex.find(BlockHash(v.GetHash()));
             if (mi == mapBlockIndex.end()) {
                 // This should not happen, but just in case...
                 continue;
             }
 
             CBlockIndex *pindex = mi->second;
             if (!IsWorthPolling(pindex)) {
                 // There is no point polling this block.
                 continue;
             }
 
             responseIndex.insert(std::make_pair(pindex, v));
         }
     }
 
     {
         // Register votes.
         auto w = vote_records.getWriteView();
         for (const auto &p : responseIndex) {
             CBlockIndex *pindex = p.first;
-            const AvalancheVote &v = p.second;
+            const Vote &v = p.second;
 
             auto it = w->find(pindex);
             if (it == w.end()) {
                 // We are not voting on that item anymore.
                 continue;
             }
 
             auto &vr = it->second;
             if (!vr.registerVote(nodeid, v.GetError())) {
                 // This vote did not provide any extra information, move on.
                 continue;
             }
 
             if (!vr.hasFinalized()) {
                 // This item has note been finalized, so we have nothing more to
                 // do.
                 updates.emplace_back(
                     pindex, vr.isAccepted() ? BlockUpdate::Status::Accepted
                                             : BlockUpdate::Status::Rejected);
                 continue;
             }
 
             // We just finalized a vote. If it is valid, then let the caller
             // know. Either way, remove the item from the map.
             updates.emplace_back(pindex, vr.isAccepted()
                                              ? BlockUpdate::Status::Finalized
                                              : BlockUpdate::Status::Invalid);
             w->erase(it);
         }
     }
 
     return true;
 }
 
 bool Processor::addPeer(NodeId nodeid, int64_t score, CPubKey pubkey) {
     LOCK(cs_peerManager);
 
     PeerId p = peerManager->addPeer(score);
     bool inserted = peerManager->addNodeToPeer(p, nodeid, std::move(pubkey));
     if (!inserted) {
         peerManager->removePeer(p);
     }
 
     return inserted;
 }
 
 bool Processor::forNode(NodeId nodeid,
                         std::function<bool(const Node &n)> func) const {
     LOCK(cs_peerManager);
     return peerManager->forNode(nodeid, std::move(func));
 }
 
 bool Processor::startEventLoop(CScheduler &scheduler) {
     return eventLoop.startEventLoop(
         scheduler, [this]() { this->runEventLoop(); }, AVALANCHE_TIME_STEP);
 }
 
 bool Processor::stopEventLoop() {
     return eventLoop.stopEventLoop();
 }
 
 std::vector<CInv> Processor::getInvsForNextPoll(bool forPoll) {
     std::vector<CInv> invs;
 
     // First remove all blocks that are not worth polling.
     {
         LOCK(cs_main);
         auto w = vote_records.getWriteView();
         for (auto it = w->begin(); it != w->end();) {
             const CBlockIndex *pindex = it->first;
             if (!IsWorthPolling(pindex)) {
                 w->erase(it++);
             } else {
                 ++it;
             }
         }
     }
 
     auto r = vote_records.getReadView();
     for (const std::pair<const CBlockIndex *const, VoteRecord> &p :
          reverse_iterate(r)) {
         // Check if we can run poll.
         const bool shouldPoll =
             forPoll ? p.second.registerPoll() : p.second.shouldPoll();
         if (!shouldPoll) {
             continue;
         }
 
         // We don't have a decision, we need more votes.
         invs.emplace_back(MSG_BLOCK, p.first->GetBlockHash());
         if (invs.size() >= AVALANCHE_MAX_ELEMENT_POLL) {
             // Make sure we do not produce more invs than specified by the
             // protocol.
             return invs;
         }
     }
 
     return invs;
 }
 
 NodeId Processor::getSuitableNodeToQuery() {
     LOCK(cs_peerManager);
     return peerManager->getSuitableNodeToQuery();
 }
 
 void Processor::clearTimedoutRequests() {
     auto now = std::chrono::steady_clock::now();
     std::map<CInv, uint8_t> timedout_items{};
 
     {
         // Clear expired requests.
         auto w = queries.getWriteView();
         auto it = w->get<query_timeout>().begin();
         while (it != w->get<query_timeout>().end() && it->timeout < now) {
             for (const auto &i : it->invs) {
                 timedout_items[i]++;
             }
 
             w->get<query_timeout>().erase(it++);
         }
     }
 
     if (timedout_items.empty()) {
         return;
     }
 
     // In flight request accounting.
     for (const auto &p : timedout_items) {
         const CInv &inv = p.first;
         assert(inv.type == MSG_BLOCK);
 
         CBlockIndex *pindex;
 
         {
             LOCK(cs_main);
             BlockMap::iterator mi = mapBlockIndex.find(BlockHash(inv.hash));
             if (mi == mapBlockIndex.end()) {
                 continue;
             }
 
             pindex = mi->second;
         }
 
         auto w = vote_records.getWriteView();
         auto it = w->find(pindex);
         if (it == w.end()) {
             continue;
         }
 
         it->second.clearInflightRequest(p.second);
     }
 }
 
 void Processor::runEventLoop() {
     // First things first, check if we have requests that timed out and clear
     // them.
     clearTimedoutRequests();
 
     // Make sure there is at least one suitable node to query before gathering
     // invs.
     NodeId nodeid = getSuitableNodeToQuery();
     if (nodeid == NO_NODE) {
         return;
     }
     std::vector<CInv> invs = getInvsForNextPoll();
     if (invs.empty()) {
         return;
     }
 
     do {
         /**
          * If we lost contact to that node, then we remove it from nodeids, but
          * never add the request to queries, which ensures bad nodes get cleaned
          * up over time.
          */
         bool hasSent = connman->ForNode(nodeid, [this, &invs](CNode *pnode) {
             uint64_t current_round = round++;
 
             {
                 // Compute the time at which this requests times out.
                 auto timeout =
                     std::chrono::steady_clock::now() + queryTimeoutDuration;
                 // Register the query.
                 queries.getWriteView()->insert(
                     {pnode->GetId(), current_round, timeout, invs});
                 // Set the timeout.
                 LOCK(cs_peerManager);
                 peerManager->updateNextRequestTime(pnode->GetId(), timeout);
             }
 
             // Send the query to the node.
             connman->PushMessage(
-                pnode,
-                CNetMsgMaker(pnode->GetSendVersion())
-                    .Make(NetMsgType::AVAPOLL,
-                          AvalanchePoll(current_round, std::move(invs))));
+                pnode, CNetMsgMaker(pnode->GetSendVersion())
+                           .Make(NetMsgType::AVAPOLL,
+                                 Poll(current_round, std::move(invs))));
             return true;
         });
 
         // Success!
         if (hasSent) {
             return;
         }
 
         {
             // This node is obsolete, delete it.
             LOCK(cs_peerManager);
             peerManager->removeNode(nodeid);
         }
 
         // Get next suitable node to try again
         nodeid = getSuitableNodeToQuery();
     } while (nodeid != NO_NODE);
 }
 
 } // namespace avalanche
diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h
index 36ed7b231..d5426bfd4 100644
--- a/src/avalanche/processor.h
+++ b/src/avalanche/processor.h
@@ -1,284 +1,284 @@
 // Copyright (c) 2018-2019 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_AVALANCHE_PROCESSOR_H
 #define BITCOIN_AVALANCHE_PROCESSOR_H
 
 #include <avalanche/node.h>
 #include <avalanche/protocol.h>
 #include <blockindexworkcomparator.h>
 #include <eventloop.h>
 #include <key.h>
 #include <rwcollection.h>
 
 #include <boost/multi_index/composite_key.hpp>
 #include <boost/multi_index/hashed_index.hpp>
 #include <boost/multi_index/member.hpp>
 #include <boost/multi_index/ordered_index.hpp>
 #include <boost/multi_index_container.hpp>
 
 #include <atomic>
 #include <chrono>
 #include <cstdint>
 #include <vector>
 
 class Config;
 class CBlockIndex;
 class CScheduler;
 
 /**
  * Is avalanche enabled by default.
  */
 static constexpr bool AVALANCHE_DEFAULT_ENABLED = false;
 
 /**
  * Finalization score.
  */
 static constexpr int AVALANCHE_FINALIZATION_SCORE = 128;
 
 /**
  * Maximum item that can be polled at once.
  */
 static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16;
 /**
  * Avalanche default cooldown in milliseconds.
  */
 static constexpr size_t AVALANCHE_DEFAULT_COOLDOWN = 100;
 /**
  * How long before we consider that a query timed out.
  */
 static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{
     10000};
 
 /**
  * How many inflight requests can exist for one item.
  */
 static constexpr int AVALANCHE_MAX_INFLIGHT_POLL = 10;
 
 namespace avalanche {
 
 class PeerManager;
 
 /**
  * Vote history.
  */
 struct VoteRecord {
 private:
     // confidence's LSB bit is the result. Higher bits are actual confidence
     // score.
     uint16_t confidence = 0;
 
     // Historical record of votes.
     uint8_t votes = 0;
     // Each bit indicate if the vote is to be considered.
     uint8_t consider = 0;
     // How many in flight requests exists for this element.
     mutable std::atomic<uint8_t> inflight{0};
 
     // Seed for pseudorandom operations.
     const uint32_t seed = 0;
 
     // Track how many successful votes occured.
     uint32_t successfulVotes = 0;
 
     // Track the nodes which are part of the quorum.
     std::array<uint16_t, 8> nodeFilter{{0, 0, 0, 0, 0, 0, 0, 0}};
 
 public:
     explicit VoteRecord(bool accepted) : confidence(accepted) {}
 
     /**
      * Copy semantic
      */
     VoteRecord(const VoteRecord &other)
         : confidence(other.confidence), votes(other.votes),
           consider(other.consider), inflight(other.inflight.load()),
           successfulVotes(other.successfulVotes), nodeFilter(other.nodeFilter) {
     }
 
     /**
      * Vote accounting facilities.
      */
     bool isAccepted() const { return confidence & 0x01; }
 
     uint16_t getConfidence() const { return confidence >> 1; }
     bool hasFinalized() const {
         return getConfidence() >= AVALANCHE_FINALIZATION_SCORE;
     }
 
     /**
      * Register a new vote for an item and update confidence accordingly.
      * Returns true if the acceptance or finalization state changed.
      */
     bool registerVote(NodeId nodeid, uint32_t error);
 
     /**
      * Register that a request is being made regarding that item.
      * The method is made const so that it can be accessed via a read only view
      * of vote_records. It's not a problem as it is made thread safe.
      */
     bool registerPoll() const;
 
     /**
      * Return if this item is in condition to be polled at the moment.
      */
     bool shouldPoll() const { return inflight < AVALANCHE_MAX_INFLIGHT_POLL; }
 
     /**
      * Clear `count` inflight requests.
      */
     void clearInflightRequest(uint8_t count = 1) { inflight -= count; }
 
 private:
     /**
      * Add the node to the quorum.
      * Returns true if the node was added, false if the node already was in the
      * quorum.
      */
     bool addNodeToQuorum(NodeId nodeid);
 };
 
 class BlockUpdate {
     union {
         CBlockIndex *pindex;
         uintptr_t raw;
     };
 
     static const size_t STATUS_BITS = 2;
     static const uintptr_t MASK = (1 << STATUS_BITS) - 1;
 
     static_assert(
         alignof(CBlockIndex) >= (1 << STATUS_BITS),
         "CBlockIndex alignement doesn't allow for Status to be stored.");
 
 public:
     enum Status : uint8_t {
         Invalid,
         Rejected,
         Accepted,
         Finalized,
     };
 
     BlockUpdate(CBlockIndex *pindexIn, Status statusIn) : pindex(pindexIn) {
         raw |= statusIn;
     }
 
     Status getStatus() const { return Status(raw & MASK); }
 
     CBlockIndex *getBlockIndex() {
         return reinterpret_cast<CBlockIndex *>(raw & ~MASK);
     }
 
     const CBlockIndex *getBlockIndex() const {
         return const_cast<BlockUpdate *>(this)->getBlockIndex();
     }
 };
 
 using BlockVoteMap =
     std::map<const CBlockIndex *, VoteRecord, CBlockIndexWorkComparator>;
 
 struct query_timeout {};
 
 namespace {
     struct AvalancheTest;
 }
 
 class Processor {
     CConnman *connman;
     std::chrono::milliseconds queryTimeoutDuration;
 
     /**
      * Blocks to run avalanche on.
      */
     RWCollection<BlockVoteMap> vote_records;
 
     /**
      * Keep track of peers and queries sent.
      */
     std::atomic<uint64_t> round;
 
     /**
      * Keep track of the peers and associated infos.
      */
     mutable Mutex cs_peerManager;
     std::unique_ptr<PeerManager> peerManager GUARDED_BY(cs_peerManager);
 
     struct Query {
         NodeId nodeid;
         uint64_t round;
         TimePoint timeout;
 
         /**
          * We declare this as mutable so it can be modified in the multi_index.
          * This is ok because we do not use this field to index in anyway.
          *
          * /!\ Do not use any mutable field as index.
          */
         mutable std::vector<CInv> invs;
     };
 
     using QuerySet = boost::multi_index_container<
         Query,
         boost::multi_index::indexed_by<
             // index by nodeid/round
             boost::multi_index::hashed_unique<boost::multi_index::composite_key<
                 Query,
                 boost::multi_index::member<Query, NodeId, &Query::nodeid>,
                 boost::multi_index::member<Query, uint64_t, &Query::round>>>,
             // sorted by timeout
             boost::multi_index::ordered_non_unique<
                 boost::multi_index::tag<query_timeout>,
                 boost::multi_index::member<Query, TimePoint,
                                            &Query::timeout>>>>;
 
     RWCollection<QuerySet> queries;
 
     /** The key used to sign responses. */
     CKey sessionKey;
 
     /** Event loop machinery. */
     EventLoop eventLoop;
 
 public:
     explicit Processor(CConnman *connmanIn);
     ~Processor();
 
     void setQueryTimeoutDuration(std::chrono::milliseconds d) {
         queryTimeoutDuration = d;
     }
 
     bool addBlockToReconcile(const CBlockIndex *pindex);
     bool isAccepted(const CBlockIndex *pindex) const;
     int getConfidence(const CBlockIndex *pindex) const;
 
     // TDOD: Refactor the API to remove the dependency on avalanche/protocol.h
-    void sendResponse(CNode *pfrom, AvalancheResponse response) const;
-    bool registerVotes(NodeId nodeid, const AvalancheResponse &response,
+    void sendResponse(CNode *pfrom, Response response) const;
+    bool registerVotes(NodeId nodeid, const Response &response,
                        std::vector<BlockUpdate> &updates);
 
     bool addPeer(NodeId nodeid, int64_t score, CPubKey pubkey);
     bool forNode(NodeId nodeid, std::function<bool(const Node &n)> func) const;
 
     CPubKey getSessionPubKey() const { return sessionKey.GetPubKey(); }
 
     bool startEventLoop(CScheduler &scheduler);
     bool stopEventLoop();
 
 private:
     void runEventLoop();
     void clearTimedoutRequests();
     std::vector<CInv> getInvsForNextPoll(bool forPoll = true);
     NodeId getSuitableNodeToQuery();
 
     friend struct ::avalanche::AvalancheTest;
 };
 
 } // namespace avalanche
 
 /**
  * Global avalanche instance.
  */
 extern std::unique_ptr<avalanche::Processor> g_avalanche;
 
 #endif // BITCOIN_AVALANCHE_PROCESSOR_H
diff --git a/src/avalanche/protocol.h b/src/avalanche/protocol.h
index b5eae7155..fe6214973 100644
--- a/src/avalanche/protocol.h
+++ b/src/avalanche/protocol.h
@@ -1,83 +1,85 @@
 // Copyright (c) 2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_AVALANCHE_PROTOCOL_H
 #define BITCOIN_AVALANCHE_PROTOCOL_H
 
 #include <protocol.h> // for CInv
 #include <serialize.h>
 #include <uint256.h>
 
 #include <cstdint>
 #include <vector>
 
-class AvalancheVote {
+namespace avalanche {
+
+class Vote {
     uint32_t error;
     uint256 hash;
 
 public:
-    AvalancheVote() : error(-1), hash() {}
-    AvalancheVote(uint32_t errorIn, uint256 hashIn)
-        : error(errorIn), hash(hashIn) {}
+    Vote() : error(-1), hash() {}
+    Vote(uint32_t errorIn, uint256 hashIn) : error(errorIn), hash(hashIn) {}
 
     const uint256 &GetHash() const { return hash; }
     uint32_t GetError() const { return error; }
 
     // serialization support
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         READWRITE(error);
         READWRITE(hash);
     }
 };
 
-class AvalancheResponse {
+class Response {
     uint64_t round;
     uint32_t cooldown;
-    std::vector<AvalancheVote> votes;
+    std::vector<Vote> votes;
 
 public:
-    AvalancheResponse() : round(-1), cooldown(-1) {}
-    AvalancheResponse(uint64_t roundIn, uint32_t cooldownIn,
-                      std::vector<AvalancheVote> votesIn)
+    Response() : round(-1), cooldown(-1) {}
+    Response(uint64_t roundIn, uint32_t cooldownIn, std::vector<Vote> votesIn)
         : round(roundIn), cooldown(cooldownIn), votes(votesIn) {}
 
     uint64_t getRound() const { return round; }
     uint32_t getCooldown() const { return cooldown; }
-    const std::vector<AvalancheVote> &GetVotes() const { return votes; }
+    const std::vector<Vote> &GetVotes() const { return votes; }
 
     // serialization support
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         READWRITE(round);
         READWRITE(cooldown);
         READWRITE(votes);
     }
 };
 
-class AvalanchePoll {
+class Poll {
     uint64_t round;
     std::vector<CInv> invs;
 
 public:
-    AvalanchePoll(uint64_t roundIn, std::vector<CInv> invsIn)
+    Poll(uint64_t roundIn, std::vector<CInv> invsIn)
         : round(roundIn), invs(invsIn) {}
 
     const std::vector<CInv> &GetInvs() const { return invs; }
 
     // serialization support
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         READWRITE(round);
         READWRITE(invs);
     }
 };
 
+} // namespace avalanche
+
 #endif // BITCOIN_AVALANCHE_PROTOCOL_H
diff --git a/src/avalanche/test/processor_tests.cpp b/src/avalanche/test/processor_tests.cpp
index e261c1490..7e3e149d5 100644
--- a/src/avalanche/test/processor_tests.cpp
+++ b/src/avalanche/test/processor_tests.cpp
@@ -1,976 +1,969 @@
 // Copyright (c) 2018-2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <avalanche/processor.h>
 
 #include <avalanche/peermanager.h>
 #include <config.h>
 #include <net_processing.h> // For PeerLogicValidation
 #include <util/time.h>
 
 #include <test/util/setup_common.h>
 
 #include <boost/test/unit_test.hpp>
 
 using namespace avalanche;
 
 namespace avalanche {
 namespace {
     struct AvalancheTest {
         static void runEventLoop(avalanche::Processor &p) { p.runEventLoop(); }
 
         static std::vector<CInv> getInvsForNextPoll(Processor &p) {
             return p.getInvsForNextPoll(false);
         }
 
         static NodeId getSuitableNodeToQuery(Processor &p) {
             return p.getSuitableNodeToQuery();
         }
 
         static PeerManager &getPeerManager(Processor &p) {
             LOCK(p.cs_peerManager);
             return *p.peerManager;
         }
 
         static uint64_t getRound(const Processor &p) { return p.round; }
     };
 } // namespace
 } // namespace avalanche
 
 struct CConnmanTest : public CConnman {
     using CConnman::CConnman;
     void AddNode(CNode &node) {
         LOCK(cs_vNodes);
         vNodes.push_back(&node);
     }
     void ClearNodes() {
         LOCK(cs_vNodes);
         for (CNode *node : vNodes) {
             delete node;
         }
         vNodes.clear();
     }
 };
 
 BOOST_FIXTURE_TEST_SUITE(processor_tests, TestChain100Setup)
 
 #define REGISTER_VOTE_AND_CHECK(vr, vote, state, finalized, confidence)        \
     vr.registerVote(NO_NODE, vote);                                            \
     BOOST_CHECK_EQUAL(vr.isAccepted(), state);                                 \
     BOOST_CHECK_EQUAL(vr.hasFinalized(), finalized);                           \
     BOOST_CHECK_EQUAL(vr.getConfidence(), confidence);
 
 BOOST_AUTO_TEST_CASE(vote_record) {
     VoteRecord vraccepted(true);
 
     // Check initial state.
     BOOST_CHECK_EQUAL(vraccepted.isAccepted(), true);
     BOOST_CHECK_EQUAL(vraccepted.hasFinalized(), false);
     BOOST_CHECK_EQUAL(vraccepted.getConfidence(), 0);
 
     VoteRecord vr(false);
 
     // Check initial state.
     BOOST_CHECK_EQUAL(vr.isAccepted(), false);
     BOOST_CHECK_EQUAL(vr.hasFinalized(), false);
     BOOST_CHECK_EQUAL(vr.getConfidence(), 0);
 
     // We need to register 6 positive votes before we start counting.
     for (int i = 0; i < 6; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 0, false, false, 0);
     }
 
     // Next vote will flip state, and confidence will increase as long as we
     // vote yes.
     REGISTER_VOTE_AND_CHECK(vr, 0, true, false, 0);
 
     // A single neutral vote do not change anything.
     REGISTER_VOTE_AND_CHECK(vr, -1, true, false, 1);
     for (int i = 2; i < 8; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 0, true, false, i);
     }
 
     // Two neutral votes will stall progress.
     REGISTER_VOTE_AND_CHECK(vr, -1, true, false, 7);
     REGISTER_VOTE_AND_CHECK(vr, -1, true, false, 7);
     for (int i = 2; i < 8; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 0, true, false, 7);
     }
 
     // Now confidence will increase as long as we vote yes.
     for (int i = 8; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 0, true, false, i);
     }
 
     // The next vote will finalize the decision.
     REGISTER_VOTE_AND_CHECK(vr, 1, true, true, AVALANCHE_FINALIZATION_SCORE);
 
     // Now that we have two no votes, confidence stop increasing.
     for (int i = 0; i < 5; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 1, true, true,
                                 AVALANCHE_FINALIZATION_SCORE);
     }
 
     // Next vote will flip state, and confidence will increase as long as we
     // vote no.
     REGISTER_VOTE_AND_CHECK(vr, 1, false, false, 0);
 
     // A single neutral vote do not change anything.
     REGISTER_VOTE_AND_CHECK(vr, -1, false, false, 1);
     for (int i = 2; i < 8; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 1, false, false, i);
     }
 
     // Two neutral votes will stall progress.
     REGISTER_VOTE_AND_CHECK(vr, -1, false, false, 7);
     REGISTER_VOTE_AND_CHECK(vr, -1, false, false, 7);
     for (int i = 2; i < 8; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 1, false, false, 7);
     }
 
     // Now confidence will increase as long as we vote no.
     for (int i = 8; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         REGISTER_VOTE_AND_CHECK(vr, 1, false, false, i);
     }
 
     // The next vote will finalize the decision.
     REGISTER_VOTE_AND_CHECK(vr, 0, false, true, AVALANCHE_FINALIZATION_SCORE);
 
     // Check that inflight accounting work as expected.
     VoteRecord vrinflight(false);
     for (int i = 0; i < 2 * AVALANCHE_MAX_INFLIGHT_POLL; i++) {
         bool shouldPoll = vrinflight.shouldPoll();
         BOOST_CHECK_EQUAL(shouldPoll, i < AVALANCHE_MAX_INFLIGHT_POLL);
         BOOST_CHECK_EQUAL(vrinflight.registerPoll(), shouldPoll);
     }
 
     // Clear various number of inflight requests and check everything behaves as
     // expected.
     for (int i = 1; i < AVALANCHE_MAX_INFLIGHT_POLL; i++) {
         vrinflight.clearInflightRequest(i);
         BOOST_CHECK(vrinflight.shouldPoll());
 
         for (int j = 1; j < i; j++) {
             BOOST_CHECK(vrinflight.registerPoll());
             BOOST_CHECK(vrinflight.shouldPoll());
         }
 
         BOOST_CHECK(vrinflight.registerPoll());
         BOOST_CHECK(!vrinflight.shouldPoll());
     }
 }
 
 BOOST_AUTO_TEST_CASE(block_update) {
     CBlockIndex index;
     CBlockIndex *pindex = &index;
 
     std::set<BlockUpdate::Status> status{
         BlockUpdate::Status::Invalid,
         BlockUpdate::Status::Rejected,
         BlockUpdate::Status::Accepted,
         BlockUpdate::Status::Finalized,
     };
 
     for (auto s : status) {
         BlockUpdate abu(pindex, s);
         BOOST_CHECK(abu.getBlockIndex() == pindex);
         BOOST_CHECK_EQUAL(abu.getStatus(), s);
     }
 }
 
 CService ip(uint32_t i) {
     struct in_addr s;
     s.s_addr = i;
     return CService(CNetAddr(s), Params().GetDefaultPort());
 }
 
 CNode *ConnectNode(const Config &config, ServiceFlags nServices,
                    PeerLogicValidation &peerLogic, CConnmanTest *connman) {
     static NodeId id = 0;
 
     CAddress addr(ip(GetRandInt(0xffffffff)), NODE_NONE);
     auto node = new CNode(id++, ServiceFlags(NODE_NETWORK), 0, INVALID_SOCKET,
                           addr, 0, 0, CAddress(), "",
                           /*fInboundIn=*/false);
     node->SetSendVersion(PROTOCOL_VERSION);
     node->nServices = nServices;
     peerLogic.InitializeNode(config, node);
     node->nVersion = 1;
     node->fSuccessfullyConnected = true;
 
     connman->AddNode(*node);
     return node;
 }
 
 std::array<CNode *, 8> ConnectNodes(const Config &config, Processor &p,
                                     ServiceFlags nServices,
                                     PeerLogicValidation &peerLogic,
                                     CConnmanTest *connman) {
     PeerManager &pm = AvalancheTest::getPeerManager(p);
     PeerId pid = pm.addPeer(100);
 
     std::array<CNode *, 8> nodes;
     for (CNode *&n : nodes) {
         n = ConnectNode(config, nServices, peerLogic, connman);
         BOOST_CHECK(pm.addNodeToPeer(pid, n->GetId(), CPubKey()));
     }
 
     return nodes;
 }
 
-static AvalancheResponse next(AvalancheResponse &r) {
+static Response next(Response &r) {
     auto copy = r;
     r = {r.getRound() + 1, r.getCooldown(), r.GetVotes()};
     return copy;
 }
 
 BOOST_AUTO_TEST_CASE(block_register) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
     std::vector<BlockUpdate> updates;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex = LookupBlockIndex(blockHash);
     }
 
     // Create nodes that supports avalanche.
     auto avanodes =
         ConnectNodes(config, p, NODE_AVALANCHE, *peerLogic, connman.get());
 
     // Querying for random block returns false.
     BOOST_CHECK(!p.isAccepted(pindex));
 
     // Add a new block. Check it is added to the polls.
     BOOST_CHECK(p.addBlockToReconcile(pindex));
     auto invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHash);
 
     // Newly added blocks' state reflect the blockchain.
     BOOST_CHECK(p.isAccepted(pindex));
 
     int nextNodeIndex = 0;
-    auto registerNewVote = [&](const AvalancheResponse &resp) {
+    auto registerNewVote = [&](const Response &resp) {
         AvalancheTest::runEventLoop(p);
         auto nodeid = avanodes[nextNodeIndex++ % avanodes.size()]->GetId();
         BOOST_CHECK(p.registerVotes(nodeid, resp, updates));
     };
 
     // Let's vote for this block a few times.
-    AvalancheResponse resp{0, 0, {AvalancheVote(0, blockHash)}};
+    Response resp{0, 0, {Vote(0, blockHash)}};
     for (int i = 0; i < 6; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(p.isAccepted(pindex));
         BOOST_CHECK_EQUAL(p.getConfidence(pindex), 0);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // A single neutral vote do not change anything.
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote(-1, blockHash)}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote(-1, blockHash)}};
     registerNewVote(next(resp));
     BOOST_CHECK(p.isAccepted(pindex));
     BOOST_CHECK_EQUAL(p.getConfidence(pindex), 0);
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote(0, blockHash)}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote(0, blockHash)}};
     for (int i = 1; i < 7; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(p.isAccepted(pindex));
         BOOST_CHECK_EQUAL(p.getConfidence(pindex), i);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Two neutral votes will stall progress.
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote(-1, blockHash)}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote(-1, blockHash)}};
     registerNewVote(next(resp));
     BOOST_CHECK(p.isAccepted(pindex));
     BOOST_CHECK_EQUAL(p.getConfidence(pindex), 6);
     BOOST_CHECK_EQUAL(updates.size(), 0);
     registerNewVote(next(resp));
     BOOST_CHECK(p.isAccepted(pindex));
     BOOST_CHECK_EQUAL(p.getConfidence(pindex), 6);
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote(0, blockHash)}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote(0, blockHash)}};
     for (int i = 2; i < 8; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(p.isAccepted(pindex));
         BOOST_CHECK_EQUAL(p.getConfidence(pindex), 6);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // We vote for it numerous times to finalize it.
     for (int i = 7; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(p.isAccepted(pindex));
         BOOST_CHECK_EQUAL(p.getConfidence(pindex), i);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // As long as it is not finalized, we poll.
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHash);
 
     // Now finalize the decision.
     registerNewVote(next(resp));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(updates[0].getBlockIndex() == pindex);
     BOOST_CHECK_EQUAL(updates[0].getStatus(), BlockUpdate::Status::Finalized);
     updates = {};
 
     // Once the decision is finalized, there is no poll for it.
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 0);
 
     // Now let's undo this and finalize rejection.
     BOOST_CHECK(p.addBlockToReconcile(pindex));
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHash);
 
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote(1, blockHash)}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote(1, blockHash)}};
     for (int i = 0; i < 6; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(p.isAccepted(pindex));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Now the state will flip.
     registerNewVote(next(resp));
     BOOST_CHECK(!p.isAccepted(pindex));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(updates[0].getBlockIndex() == pindex);
     BOOST_CHECK_EQUAL(updates[0].getStatus(), BlockUpdate::Status::Rejected);
     updates = {};
 
     // Now it is rejected, but we can vote for it numerous times.
     for (int i = 1; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(!p.isAccepted(pindex));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // As long as it is not finalized, we poll.
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHash);
 
     // Now finalize the decision.
     registerNewVote(next(resp));
     BOOST_CHECK(!p.isAccepted(pindex));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(updates[0].getBlockIndex() == pindex);
     BOOST_CHECK_EQUAL(updates[0].getStatus(), BlockUpdate::Status::Invalid);
     updates = {};
 
     // Once the decision is finalized, there is no poll for it.
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 0);
 
     // Adding the block twice does nothing.
     BOOST_CHECK(p.addBlockToReconcile(pindex));
     BOOST_CHECK(!p.addBlockToReconcile(pindex));
     BOOST_CHECK(p.isAccepted(pindex));
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(multi_block_register) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
     CBlockIndex indexA, indexB;
 
     std::vector<BlockUpdate> updates;
 
     // Create several nodes that support avalanche.
     auto avanodes =
         ConnectNodes(config, p, NODE_AVALANCHE, *peerLogic, connman.get());
 
     // Make sure the block has a hash.
     CBlock blockA = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHashA = blockA.GetHash();
 
     CBlock blockB = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHashB = blockB.GetHash();
     const CBlockIndex *pindexA;
     const CBlockIndex *pindexB;
     {
         LOCK(cs_main);
         pindexA = LookupBlockIndex(blockHashA);
         pindexB = LookupBlockIndex(blockHashB);
     }
 
     // Querying for random block returns false.
     BOOST_CHECK(!p.isAccepted(pindexA));
     BOOST_CHECK(!p.isAccepted(pindexB));
 
     // Start voting on block A.
     BOOST_CHECK(p.addBlockToReconcile(pindexA));
     auto invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHashA);
 
     uint64_t round = AvalancheTest::getRound(p);
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(p.registerVotes(avanodes[0]->GetId(),
-                                {round, 0, {AvalancheVote(0, blockHashA)}},
-                                updates));
+                                {round, 0, {Vote(0, blockHashA)}}, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Start voting on block B after one vote.
-    AvalancheResponse resp{
-        round + 1,
-        0,
-        {AvalancheVote(0, blockHashB), AvalancheVote(0, blockHashA)}};
+    Response resp{round + 1, 0, {Vote(0, blockHashB), Vote(0, blockHashA)}};
     BOOST_CHECK(p.addBlockToReconcile(pindexB));
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 2);
 
     // Ensure B comes before A because it has accumulated more PoW.
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHashB);
     BOOST_CHECK_EQUAL(invs[1].type, MSG_BLOCK);
     BOOST_CHECK(invs[1].hash == blockHashA);
 
     // Let's vote for these blocks a few times.
     for (int i = 0; i < 4; i++) {
         NodeId nodeid = AvalancheTest::getSuitableNodeToQuery(p);
         AvalancheTest::runEventLoop(p);
         BOOST_CHECK(p.registerVotes(nodeid, next(resp), updates));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Now it is accepted, but we can vote for it numerous times.
     for (int i = 0; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         NodeId nodeid = AvalancheTest::getSuitableNodeToQuery(p);
         AvalancheTest::runEventLoop(p);
         BOOST_CHECK(p.registerVotes(nodeid, next(resp), updates));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Running two iterration of the event loop so that vote gets triggered on A
     // and B.
     NodeId firstNodeid = AvalancheTest::getSuitableNodeToQuery(p);
     AvalancheTest::runEventLoop(p);
     NodeId secondNodeid = AvalancheTest::getSuitableNodeToQuery(p);
     AvalancheTest::runEventLoop(p);
 
     BOOST_CHECK(firstNodeid != secondNodeid);
 
     // Next vote will finalize block A.
     BOOST_CHECK(p.registerVotes(firstNodeid, next(resp), updates));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(updates[0].getBlockIndex() == pindexA);
     BOOST_CHECK_EQUAL(updates[0].getStatus(), BlockUpdate::Status::Finalized);
     updates = {};
 
     // We do not vote on A anymore.
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHashB);
 
     // Next vote will finalize block B.
     BOOST_CHECK(p.registerVotes(secondNodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(updates[0].getBlockIndex() == pindexB);
     BOOST_CHECK_EQUAL(updates[0].getStatus(), BlockUpdate::Status::Finalized);
     updates = {};
 
     // There is nothing left to vote on.
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 0);
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(poll_and_response) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
 
     std::vector<BlockUpdate> updates;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex = LookupBlockIndex(blockHash);
     }
 
     // There is no node to query.
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), NO_NODE);
 
     // Create a node that supports avalanche and one that doesn't.
     ConnectNode(config, NODE_NONE, *peerLogic, connman.get());
     auto avanode =
         ConnectNode(config, NODE_AVALANCHE, *peerLogic, connman.get());
     NodeId avanodeid = avanode->GetId();
     BOOST_CHECK(p.addPeer(avanodeid, 100, CPubKey()));
 
     // It returns the avalanche peer.
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // Register a block and check it is added to the list of elements to poll.
     BOOST_CHECK(p.addBlockToReconcile(pindex));
     auto invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHash);
 
     // Trigger a poll on avanode.
     uint64_t round = AvalancheTest::getRound(p);
     AvalancheTest::runEventLoop(p);
 
     // There is no more suitable peer available, so return nothing.
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), NO_NODE);
 
     // Respond to the request.
-    AvalancheResponse resp = {round, 0, {AvalancheVote(0, blockHash)}};
+    Response resp = {round, 0, {Vote(0, blockHash)}};
     BOOST_CHECK(p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Now that avanode fullfilled his request, it is added back to the list of
     // queriable nodes.
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // Sending a response when not polled fails.
     BOOST_CHECK(!p.registerVotes(avanodeid, next(resp), updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Trigger a poll on avanode.
     round = AvalancheTest::getRound(p);
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), NO_NODE);
 
     // Sending responses that do not match the request also fails.
     // 1. Too many results.
-    resp = {
-        round, 0, {AvalancheVote(0, blockHash), AvalancheVote(0, blockHash)}};
+    resp = {round, 0, {Vote(0, blockHash), Vote(0, blockHash)}};
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(!p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // 2. Not enough results.
     resp = {AvalancheTest::getRound(p), 0, {}};
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(!p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // 3. Do not match the poll.
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote()}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote()}};
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(!p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // 4. Invalid round count. Request is not discarded.
     uint64_t queryRound = AvalancheTest::getRound(p);
     AvalancheTest::runEventLoop(p);
 
-    resp = {queryRound + 1, 0, {AvalancheVote()}};
+    resp = {queryRound + 1, 0, {Vote()}};
     BOOST_CHECK(!p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
-    resp = {queryRound - 1, 0, {AvalancheVote()}};
+    resp = {queryRound - 1, 0, {Vote()}};
     BOOST_CHECK(!p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // 5. Making request for invalid nodes do not work. Request is not
     // discarded.
-    resp = {queryRound, 0, {AvalancheVote(0, blockHash)}};
+    resp = {queryRound, 0, {Vote(0, blockHash)}};
     BOOST_CHECK(!p.registerVotes(avanodeid + 1234, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Proper response gets processed and avanode is available again.
-    resp = {queryRound, 0, {AvalancheVote(0, blockHash)}};
+    resp = {queryRound, 0, {Vote(0, blockHash)}};
     BOOST_CHECK(p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // Out of order response are rejected.
     CBlock block2 = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash2 = block2.GetHash();
     CBlockIndex *pindex2;
     {
         LOCK(cs_main);
         pindex2 = LookupBlockIndex(blockHash2);
     }
     BOOST_CHECK(p.addBlockToReconcile(pindex2));
 
     resp = {AvalancheTest::getRound(p),
             0,
-            {AvalancheVote(0, blockHash), AvalancheVote(0, blockHash2)}};
+            {Vote(0, blockHash), Vote(0, blockHash2)}};
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(!p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // But they are accepted in order.
     resp = {AvalancheTest::getRound(p),
             0,
-            {AvalancheVote(0, blockHash2), AvalancheVote(0, blockHash)}};
+            {Vote(0, blockHash2), Vote(0, blockHash)}};
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     // When a block is marked invalid, stop polling.
     pindex2->nStatus = pindex2->nStatus.withFailed();
-    resp = {AvalancheTest::getRound(p), 0, {AvalancheVote(0, blockHash)}};
+    resp = {AvalancheTest::getRound(p), 0, {Vote(0, blockHash)}};
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK(p.registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), avanodeid);
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(poll_inflight_timeout, *boost::unit_test::timeout(60)) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
 
     std::vector<BlockUpdate> updates;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex = LookupBlockIndex(blockHash);
     }
 
     // Add the block
     BOOST_CHECK(p.addBlockToReconcile(pindex));
 
     // Create a node that supports avalanche.
     auto avanode =
         ConnectNode(config, NODE_AVALANCHE, *peerLogic, connman.get());
     NodeId avanodeid = avanode->GetId();
     BOOST_CHECK(p.addPeer(avanodeid, 100, CPubKey()));
 
     // Expire requests after some time.
     auto queryTimeDuration = std::chrono::milliseconds(10);
     p.setQueryTimeoutDuration(queryTimeDuration);
     for (int i = 0; i < 10; i++) {
-        AvalancheResponse resp = {
-            AvalancheTest::getRound(p), 0, {AvalancheVote(0, blockHash)}};
+        Response resp = {AvalancheTest::getRound(p), 0, {Vote(0, blockHash)}};
 
         auto start = std::chrono::steady_clock::now();
         AvalancheTest::runEventLoop(p);
         // We cannot guarantee that we'll wait for just 1ms, so we have to bail
         // if we aren't within the proper time range.
         std::this_thread::sleep_for(std::chrono::milliseconds(1));
         AvalancheTest::runEventLoop(p);
 
         bool ret = p.registerVotes(avanodeid, next(resp), updates);
         if (std::chrono::steady_clock::now() > start + queryTimeDuration) {
             // We waited for too long, bail. Because we can't know for sure when
             // previous steps ran, ret is not deterministic and we do not check
             // it.
             i--;
             continue;
         }
 
         // We are within time bounds, so the vote should have worked.
         BOOST_CHECK(ret);
 
         // Now try again but wait for expiration.
         AvalancheTest::runEventLoop(p);
         std::this_thread::sleep_for(queryTimeDuration);
         AvalancheTest::runEventLoop(p);
         BOOST_CHECK(!p.registerVotes(avanodeid, next(resp), updates));
     }
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(poll_inflight_count) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
 
     // Create enough nodes so that we run into the inflight request limit.
     PeerManager &pm = AvalancheTest::getPeerManager(p);
     PeerId pid = pm.addPeer(100);
 
     std::array<CNode *, AVALANCHE_MAX_INFLIGHT_POLL + 1> nodes;
     for (auto &n : nodes) {
         n = ConnectNode(config, NODE_AVALANCHE, *peerLogic, connman.get());
         BOOST_CHECK(pm.addNodeToPeer(pid, n->GetId(), CPubKey()));
     }
 
     // Add a block to poll
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex = LookupBlockIndex(blockHash);
     }
     BOOST_CHECK(p.addBlockToReconcile(pindex));
 
     // Ensure there are enough requests in flight.
     std::map<NodeId, uint64_t> node_round_map;
     for (int i = 0; i < AVALANCHE_MAX_INFLIGHT_POLL; i++) {
         NodeId nodeid = AvalancheTest::getSuitableNodeToQuery(p);
         BOOST_CHECK(node_round_map.find(nodeid) == node_round_map.end());
         node_round_map[nodeid] = AvalancheTest::getRound(p);
         auto invs = AvalancheTest::getInvsForNextPoll(p);
         BOOST_CHECK_EQUAL(invs.size(), 1);
         BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
         BOOST_CHECK(invs[0].hash == blockHash);
         AvalancheTest::runEventLoop(p);
     }
 
     // Now that we have enough in flight requests, we shouldn't poll.
     auto suitablenodeid = AvalancheTest::getSuitableNodeToQuery(p);
     BOOST_CHECK(suitablenodeid != NO_NODE);
     auto invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 0);
     AvalancheTest::runEventLoop(p);
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), suitablenodeid);
 
     std::vector<BlockUpdate> updates;
 
     // Send one response, now we can poll again.
     auto it = node_round_map.begin();
-    AvalancheResponse resp = {it->second, 0, {AvalancheVote(0, blockHash)}};
+    Response resp = {it->second, 0, {Vote(0, blockHash)}};
     BOOST_CHECK(p.registerVotes(it->first, resp, updates));
     node_round_map.erase(it);
 
     invs = AvalancheTest::getInvsForNextPoll(p);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK(invs[0].hash == blockHash);
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(quorum_diversity) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
     std::vector<BlockUpdate> updates;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex = LookupBlockIndex(blockHash);
     }
 
     // Create nodes that supports avalanche.
     auto avanodes =
         ConnectNodes(config, p, NODE_AVALANCHE, *peerLogic, connman.get());
 
     // Querying for random block returns false.
     BOOST_CHECK(!p.isAccepted(pindex));
 
     // Add a new block. Check it is added to the polls.
     BOOST_CHECK(p.addBlockToReconcile(pindex));
 
     // Do one valid round of voting.
     uint64_t round = AvalancheTest::getRound(p);
-    AvalancheResponse resp{round, 0, {AvalancheVote(0, blockHash)}};
+    Response resp{round, 0, {Vote(0, blockHash)}};
 
     // Check that all nodes can vote.
     for (size_t i = 0; i < avanodes.size(); i++) {
         AvalancheTest::runEventLoop(p);
         BOOST_CHECK(p.registerVotes(avanodes[i]->GetId(), next(resp), updates));
     }
 
     // Generate a query for every single node.
     const NodeId firstNodeId = AvalancheTest::getSuitableNodeToQuery(p);
     std::map<NodeId, uint64_t> node_round_map;
     round = AvalancheTest::getRound(p);
     for (size_t i = 0; i < avanodes.size(); i++) {
         NodeId nodeid = AvalancheTest::getSuitableNodeToQuery(p);
         BOOST_CHECK(node_round_map.find(nodeid) == node_round_map.end());
         node_round_map[nodeid] = AvalancheTest::getRound(p);
         AvalancheTest::runEventLoop(p);
     }
 
     // Now only tge first node can vote. All others would be duplicate in the
     // quorum.
     auto confidence = p.getConfidence(pindex);
     BOOST_REQUIRE(confidence > 0);
 
     for (auto &pair : node_round_map) {
         NodeId nodeid = pair.first;
         uint64_t r = pair.second;
 
         if (nodeid == firstNodeId) {
             // Node 0 is the only one which can vote at this stage.
             round = r;
             continue;
         }
 
-        BOOST_CHECK(p.registerVotes(
-            nodeid, {r, 0, {AvalancheVote(0, blockHash)}}, updates));
+        BOOST_CHECK(
+            p.registerVotes(nodeid, {r, 0, {Vote(0, blockHash)}}, updates));
         BOOST_CHECK_EQUAL(p.getConfidence(pindex), confidence);
     }
 
-    BOOST_CHECK(p.registerVotes(
-        firstNodeId, {round, 0, {AvalancheVote(0, blockHash)}}, updates));
+    BOOST_CHECK(p.registerVotes(firstNodeId, {round, 0, {Vote(0, blockHash)}},
+                                updates));
     BOOST_CHECK_EQUAL(p.getConfidence(pindex), confidence + 1);
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(event_loop) {
     const Config &config = GetConfig();
 
     auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337);
     auto peerLogic = std::make_unique<PeerLogicValidation>(
         connman.get(), nullptr, *m_node.scheduler, false);
 
     Processor p(connman.get());
     CScheduler s;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex = LookupBlockIndex(blockHash);
     }
 
     // Starting the event loop.
     BOOST_CHECK(p.startEventLoop(s));
 
     // There is one task planned in the next hour (our event loop).
     std::chrono::system_clock::time_point start, stop;
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 1);
 
     // Starting twice doesn't start it twice.
     BOOST_CHECK(!p.startEventLoop(s));
 
     // Start the scheduler thread.
     std::thread schedulerThread(std::bind(&CScheduler::serviceQueue, &s));
 
     // Create a node that supports avalanche.
     auto avanode =
         ConnectNode(config, NODE_AVALANCHE, *peerLogic, connman.get());
     NodeId nodeid = avanode->GetId();
     BOOST_CHECK(p.addPeer(nodeid, 100, CPubKey()));
 
     // There is no query in flight at the moment.
     BOOST_CHECK_EQUAL(AvalancheTest::getSuitableNodeToQuery(p), nodeid);
 
     // Add a new block. Check it is added to the polls.
     uint64_t queryRound = AvalancheTest::getRound(p);
     BOOST_CHECK(p.addBlockToReconcile(pindex));
 
     for (int i = 0; i < 60 * 1000; i++) {
         // Technically, this is a race condition, but this should do just fine
         // as we wait up to 1 minute for an event that should take 10ms.
         UninterruptibleSleep(std::chrono::milliseconds(1));
         if (AvalancheTest::getRound(p) != queryRound) {
             break;
         }
     }
 
     // Check that we effectively got a request and not timed out.
     BOOST_CHECK(AvalancheTest::getRound(p) > queryRound);
 
     // Respond and check the cooldown time is respected.
     uint64_t responseRound = AvalancheTest::getRound(p);
     auto queryTime =
         std::chrono::steady_clock::now() + std::chrono::milliseconds(100);
 
     std::vector<BlockUpdate> updates;
-    p.registerVotes(nodeid, {queryRound, 100, {AvalancheVote(0, blockHash)}},
-                    updates);
+    p.registerVotes(nodeid, {queryRound, 100, {Vote(0, blockHash)}}, updates);
     for (int i = 0; i < 10000; i++) {
         // We make sure that we do not get a request before queryTime.
         UninterruptibleSleep(std::chrono::milliseconds(1));
         if (AvalancheTest::getRound(p) != responseRound) {
             BOOST_CHECK(std::chrono::steady_clock::now() > queryTime);
             break;
         }
     }
 
     // But we eventually get one.
     BOOST_CHECK(AvalancheTest::getRound(p) > responseRound);
 
     // Stop event loop.
     BOOST_CHECK(p.stopEventLoop());
 
     // We don't have any task scheduled anymore.
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 0);
 
     // Can't stop the event loop twice.
     BOOST_CHECK(!p.stopEventLoop());
 
     // Wait for the scheduler to stop.
     s.stop(true);
     schedulerThread.join();
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(destructor) {
     CScheduler s;
     std::chrono::system_clock::time_point start, stop;
 
     // Start the scheduler thread.
     std::thread schedulerThread(std::bind(&CScheduler::serviceQueue, &s));
 
     {
         Processor p(m_node.connman.get());
         BOOST_CHECK(p.startEventLoop(s));
         BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 1);
     }
 
     // Now that avalanche is destroyed, there is no more scheduled tasks.
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 0);
 
     // Wait for the scheduler to stop.
     s.stop(true);
     schedulerThread.join();
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 71e414598..fd6213f18 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,5086 +1,5086 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <net_processing.h>
 
 #include <addrman.h>
 #include <avalanche/processor.h>
 #include <banman.h>
 #include <blockencodings.h>
 #include <blockvalidity.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <hash.h>
 #include <merkleblock.h>
 #include <netbase.h>
 #include <netmessagemaker.h>
 #include <policy/fees.h>
 #include <policy/policy.h>
 #include <primitives/block.h>
 #include <primitives/transaction.h>
 #include <random.h>
 #include <reverse_iterator.h>
 #include <scheduler.h>
 #include <tinyformat.h>
 #include <txmempool.h>
 #include <util/strencodings.h>
 #include <util/system.h>
 #include <util/validation.h>
 #include <validation.h>
 
 #include <memory>
 
 #if defined(NDEBUG)
 #error "Bitcoin cannot be compiled without assertions."
 #endif
 
 /** Expiration time for orphan transactions in seconds */
 static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60;
 /** Minimum time between orphan transactions expire time checks in seconds */
 static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60;
 /** How long to cache transactions in mapRelay for normal relay */
 static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60};
 /**
  * Headers download timeout expressed in microseconds.
  * Timeout = base + per_header * (expected number of headers)
  */
 // 15 minutes
 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000;
 // 1ms/header
 static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000;
 /**
  * Protect at least this many outbound peers from disconnection due to
  * slow/behind headers chain.
  */
 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
 /**
  * Timeout for (unprotected) outbound peers to sync to our chainwork, in
  * seconds.
  */
 // 20 minutes
 static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60;
 /** How frequently to check for stale tips, in seconds */
 // 10 minutes
 static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60;
 /**
  * How frequently to check for extra outbound peers and disconnect, in seconds.
  */
 static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45;
 /**
  * Minimum time an outbound-peer-eviction candidate must be connected for, in
  * order to evict, in seconds.
  */
 static constexpr int64_t MINIMUM_CONNECT_TIME = 30;
 /** SHA256("main address relay")[0:8] */
 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
 /// Age after which a stale block will no longer be served if requested as
 /// protection against fingerprinting. Set to one month, denominated in seconds.
 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
 /// Age after which a block is considered historical for purposes of rate
 /// limiting block relay. Set to one week, denominated in seconds.
 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
 /** Maximum number of in-flight transactions from a peer */
 static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100;
 /** Maximum number of announced transactions from a peer */
 static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ;
 /** How many microseconds to delay requesting transactions from inbound peers */
 static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{
     std::chrono::seconds{2}};
 /**
  * How long to wait (in microseconds) before downloading a transaction from an
  * additional peer.
  */
 static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{
     std::chrono::seconds{60}};
 /**
  * Maximum delay (in microseconds) for transaction requests to avoid biasing
  * some peers over others.
  */
 static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{
     std::chrono::seconds{2}};
 /**
  * How long to wait (in microseconds) before expiring an in-flight getdata
  * request to a peer.
  */
 static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{
     GETDATA_TX_INTERVAL * 10};
 static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY,
               "To preserve security, MAX_GETDATA_RANDOM_DELAY should not "
               "exceed INBOUND_PEER_DELAY");
 /**
  * Limit to avoid sending big packets. Not used in processing incoming GETDATA
  * for compatibility.
  */
 static const unsigned int MAX_GETDATA_SZ = 1000;
 
 /// How many non standard orphan do we consider from a node before ignoring it.
 static constexpr uint32_t MAX_NON_STANDARD_ORPHAN_PER_NODE = 5;
 
 struct COrphanTx {
     // When modifying, adapt the copy of this definition in tests/DoS_tests.
     CTransactionRef tx;
     NodeId fromPeer;
     int64_t nTimeExpire;
     size_t list_pos;
 };
 
 RecursiveMutex g_cs_orphans;
 std::map<TxId, COrphanTx> mapOrphanTransactions GUARDED_BY(g_cs_orphans);
 
 void EraseOrphansFor(NodeId peer);
 
 /**
  * Average delay between local address broadcasts in seconds.
  */
 static constexpr unsigned int AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL =
     24 * 60 * 60;
 /**
  * Average delay between peer address broadcasts in seconds.
  */
 static const unsigned int AVG_ADDRESS_BROADCAST_INTERVAL = 30;
 /**
  * Average delay between trickled inventory transmissions in seconds.
  * Blocks and whitelisted receivers bypass this, outbound peers get half this
  * delay.
  */
 static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5;
 /**
  * Maximum number of inventory items to send per transmission.
  * Limits the impact of low-fee transaction floods.
  */
 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
     7 * INVENTORY_BROADCAST_INTERVAL;
 /**
  * Average delay between feefilter broadcasts in seconds.
  */
 static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60;
 /**
  * Maximum feefilter broadcast delay after significant change.
  */
 static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60;
 
 // Internal stuff
 namespace {
 /** Number of nodes with fSyncStarted. */
 int nSyncStarted GUARDED_BY(cs_main) = 0;
 
 /**
  * Sources of received blocks, saved to be able to send them reject messages or
  * ban them when processing happens afterwards.
  * Set mapBlockSource[hash].second to false if the node should not be punished
  * if the block is invalid.
  */
 std::map<BlockHash, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main);
 
 /**
  * Filter for transactions that were recently rejected by AcceptToMemoryPool.
  * These are not rerequested until the chain tip changes, at which point the
  * entire filter is reset.
  *
  * Without this filter we'd be re-requesting txs from each of our peers,
  * increasing bandwidth consumption considerably. For instance, with 100 peers,
  * half of which relay a tx we don't accept, that might be a 50x bandwidth
  * increase. A flooding attacker attempting to roll-over the filter using
  * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have
  * fast peers, so we pick 120,000 to give our peers a two minute window to send
  * invs to us.
  *
  * Decreasing the false positive rate is fairly cheap, so we pick one in a
  * million to make it highly unlikely for users to have issues with this filter.
  *
  * Memory used: 1.3 MB
  */
 std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
 uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
 
 /**
  * Blocks that are in flight, and that are in the queue to be downloaded.
  */
 struct QueuedBlock {
     BlockHash hash;
     //! Optional.
     const CBlockIndex *pindex;
     //! Whether this block has validated headers at the time of request.
     bool fValidatedHeaders;
     //! Optional, used for CMPCTBLOCK downloads
     std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
 };
 std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
     mapBlocksInFlight GUARDED_BY(cs_main);
 
 /** Stack of nodes which we have set to announce using compact blocks */
 std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
 
 /** Number of preferable block download peers. */
 int nPreferredDownload GUARDED_BY(cs_main) = 0;
 
 /** Number of peers from which we're downloading blocks. */
 int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0;
 
 /** Number of outbound peers with m_chain_sync.m_protect. */
 int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
 
 /** When our tip was last updated. */
 std::atomic<int64_t> g_last_tip_update(0);
 
 /** Relay map. */
 typedef std::map<uint256, CTransactionRef> MapRelay;
 MapRelay mapRelay GUARDED_BY(cs_main);
 /**
  * Expiration-time ordered list of (expire time, relay map entry) pairs,
  * protected by cs_main).
  */
 std::deque<std::pair<int64_t, MapRelay::iterator>>
     vRelayExpiration GUARDED_BY(cs_main);
 
 struct IteratorComparator {
     template <typename I> bool operator()(const I &a, const I &b) const {
         return &(*a) < &(*b);
     }
 };
 std::map<COutPoint,
          std::set<std::map<TxId, COrphanTx>::iterator, IteratorComparator>>
     mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans);
 
 //! For random eviction
 std::vector<std::map<TxId, COrphanTx>::iterator>
     g_orphan_list GUARDED_BY(g_cs_orphans);
 
 static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0;
 static std::vector<std::pair<TxHash, CTransactionRef>>
     vExtraTxnForCompact GUARDED_BY(g_cs_orphans);
 } // namespace
 
 namespace {
 struct CBlockReject {
     uint8_t chRejectCode;
     std::string strRejectReason;
     uint256 hashBlock;
 };
 
 /**
  * Maintain validation-specific state about nodes, protected by cs_main, instead
  * by CNode's own locks. This simplifies asynchronous operation, where
  * processing of incoming data is done after the ProcessMessage call returns,
  * and we're no longer holding the node's locks.
  */
 struct CNodeState {
     //! The peer's address
     const CService address;
     //! Whether we have a fully established connection.
     bool fCurrentlyConnected;
     //! Accumulated misbehaviour score for this peer.
     int nMisbehavior;
     //! Whether this peer should be disconnected and banned (unless
     //! whitelisted).
     bool fShouldBan;
     //! String name of this peer (debugging/logging purposes).
     const std::string name;
     //! List of asynchronously-determined block rejections to notify this peer
     //! about.
     std::vector<CBlockReject> rejects;
     //! The best known block we know this peer has announced.
     const CBlockIndex *pindexBestKnownBlock;
     //! The hash of the last unknown block this peer has announced.
     BlockHash hashLastUnknownBlock;
     //! The last full block we both have.
     const CBlockIndex *pindexLastCommonBlock;
     //! The best header we have sent our peer.
     const CBlockIndex *pindexBestHeaderSent;
     //! Length of current-streak of unconnecting headers announcements
     int nUnconnectingHeaders;
     //! Whether we've started headers synchronization with this peer.
     bool fSyncStarted;
     //! When to potentially disconnect peer for stalling headers download
     int64_t nHeadersSyncTimeout;
     //! Since when we're stalling block download progress (in microseconds), or
     //! 0.
     int64_t nStallingSince;
     std::list<QueuedBlock> vBlocksInFlight;
     //! When the first entry in vBlocksInFlight started downloading. Don't care
     //! when vBlocksInFlight is empty.
     int64_t nDownloadingSince;
     int nBlocksInFlight;
     int nBlocksInFlightValidHeaders;
     //! Whether we consider this a preferred download peer.
     bool fPreferredDownload;
     //! Whether this peer wants invs or headers (when possible) for block
     //! announcements.
     bool fPreferHeaders;
     //! Whether this peer wants invs or cmpctblocks (when possible) for block
     //! announcements.
     bool fPreferHeaderAndIDs;
     /**
      * Whether this peer will send us cmpctblocks if we request them.
      * This is not used to gate request logic, as we really only care about
      * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the
      * version of compact blocks we send.
      */
     bool fProvidesHeaderAndIDs;
     /**
      * If we've announced NODE_WITNESS to this peer: whether the peer sends
      * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends
      * non-witnesses in cmpctblocks/blocktxns.
      */
     bool fSupportsDesiredCmpctVersion;
 
     /**
      * State used to enforce CHAIN_SYNC_TIMEOUT
      * Only in effect for outbound, non-manual, full-relay connections, with
      * m_protect == false
      * Algorithm: if a peer's best known block has less work than our tip, set
      * a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
      *   - If at timeout their best known block now has more work than our tip
      * when the timeout was set, then either reset the timeout or clear it
      * (after comparing against our current tip's work)
      *   - If at timeout their best known block still has less work than our tip
      * did when the timeout was set, then send a getheaders message, and set a
      * shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best
      * known block is still behind when that new timeout is reached, disconnect.
      */
     struct ChainSyncTimeoutState {
         //! A timeout used for checking whether our peer has sufficiently
         //! synced.
         int64_t m_timeout;
         //! A header with the work we require on our peer's chain.
         const CBlockIndex *m_work_header;
         //! After timeout is reached, set to true after sending getheaders.
         bool m_sent_getheaders;
         //! Whether this peer is protected from disconnection due to a bad/slow
         //! chain.
         bool m_protect;
     };
 
     ChainSyncTimeoutState m_chain_sync;
 
     //! Time of last new block announcement
     int64_t m_last_block_announcement;
 
     /*
      * State associated with transaction download.
      *
      * Tx download algorithm:
      *
      *   When inv comes in, queue up (process_time, txid) inside the peer's
      *   CNodeState (m_tx_process_time) as long as m_tx_announced for the peer
      *   isn't too big (MAX_PEER_TX_ANNOUNCEMENTS).
      *
      *   The process_time for a transaction is set to nNow for outbound peers,
      *   nNow + 2 seconds for inbound peers. This is the time at which we'll
      *   consider trying to request the transaction from the peer in
      *   SendMessages(). The delay for inbound peers is to allow outbound peers
      *   a chance to announce before we request from inbound peers, to prevent
      *   an adversary from using inbound connections to blind us to a
      *   transaction (InvBlock).
      *
      *   When we call SendMessages() for a given peer,
      *   we will loop over the transactions in m_tx_process_time, looking
      *   at the transactions whose process_time <= nNow. We'll request each
      *   such transaction that we don't have already and that hasn't been
      *   requested from another peer recently, up until we hit the
      *   MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update
      *   g_already_asked_for for each requested txid, storing the time of the
      *   GETDATA request. We use g_already_asked_for to coordinate transaction
      *   requests amongst our peers.
      *
      *   For transactions that we still need but we have already recently
      *   requested from some other peer, we'll reinsert (process_time, txid)
      *   back into the peer's m_tx_process_time at the point in the future at
      *   which the most recent GETDATA request would time out (ie
      *   GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for).
      *   We add an additional delay for inbound peers, again to prefer
      *   attempting download from outbound peers first.
      *   We also add an extra small random delay up to 2 seconds
      *   to avoid biasing some peers over others. (e.g., due to fixed ordering
      *   of peer processing in ThreadMessageHandler).
      *
      *   When we receive a transaction from a peer, we remove the txid from the
      *   peer's m_tx_in_flight set and from their recently announced set
      *   (m_tx_announced).  We also clear g_already_asked_for for that entry, so
      *   that if somehow the transaction is not accepted but also not added to
      *   the reject filter, then we will eventually redownload from other
      *   peers.
      */
     struct TxDownloadState {
         /**
          * Track when to attempt download of announced transactions (process
          * time in micros -> txid)
          */
         std::multimap<std::chrono::microseconds, TxId> m_tx_process_time;
 
         //! Store all the transactions a peer has recently announced
         std::set<TxId> m_tx_announced;
 
         //! Store transactions which were requested by us, with timestamp
         std::map<TxId, std::chrono::microseconds> m_tx_in_flight;
 
         //! Periodically check for stuck getdata requests
         std::chrono::microseconds m_check_expiry_timer{0};
     };
 
     TxDownloadState m_tx_download;
 
     struct AvalancheState {
         std::chrono::time_point<std::chrono::steady_clock> last_poll;
     };
 
     AvalancheState m_avalanche_state;
 
     //! Whether this peer is an inbound connection
     bool m_is_inbound;
 
     //! Whether this peer is a manual connection
     bool m_is_manual_connection;
 
     CNodeState(CAddress addrIn, std::string addrNameIn, bool is_inbound,
                bool is_manual)
         : address(addrIn), name(std::move(addrNameIn)),
           m_is_inbound(is_inbound), m_is_manual_connection(is_manual) {
         fCurrentlyConnected = false;
         nMisbehavior = 0;
         fShouldBan = false;
         pindexBestKnownBlock = nullptr;
         hashLastUnknownBlock = BlockHash();
         pindexLastCommonBlock = nullptr;
         pindexBestHeaderSent = nullptr;
         nUnconnectingHeaders = 0;
         fSyncStarted = false;
         nHeadersSyncTimeout = 0;
         nStallingSince = 0;
         nDownloadingSince = 0;
         nBlocksInFlight = 0;
         nBlocksInFlightValidHeaders = 0;
         fPreferredDownload = false;
         fPreferHeaders = false;
         fPreferHeaderAndIDs = false;
         fProvidesHeaderAndIDs = false;
         fSupportsDesiredCmpctVersion = false;
         m_chain_sync = {0, nullptr, false, false};
         m_last_block_announcement = 0;
     }
 };
 
 // Keeps track of the time (in microseconds) when transactions were requested
 // last time
 limitedmap<TxId, std::chrono::microseconds>
     g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ);
 
 /** Map maintaining per-node state. */
 static std::map<NodeId, CNodeState> mapNodeState GUARDED_BY(cs_main);
 
 static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
     if (it == mapNodeState.end()) {
         return nullptr;
     }
 
     return &it->second;
 }
 
 static void UpdatePreferredDownload(CNode *node, CNodeState *state)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     nPreferredDownload -= state->fPreferredDownload;
 
     // Whether this node should be marked as a preferred download node.
     state->fPreferredDownload =
         (!node->fInbound || node->HasPermission(PF_NOBAN)) && !node->fOneShot &&
         !node->fClient;
 
     nPreferredDownload += state->fPreferredDownload;
 }
 
 static void PushNodeVersion(const Config &config, CNode *pnode,
                             CConnman *connman, int64_t nTime) {
     ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
     uint64_t nonce = pnode->GetLocalNonce();
     int nNodeStartingHeight = pnode->GetMyStartingHeight();
     NodeId nodeid = pnode->GetId();
     CAddress addr = pnode->addr;
 
     CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr)
                             ? addr
                             : CAddress(CService(), addr.nServices));
     CAddress addrMe = CAddress(CService(), nLocalNodeServices);
 
     connman->PushMessage(
         pnode, CNetMsgMaker(INIT_PROTO_VERSION)
                    .Make(NetMsgType::VERSION, PROTOCOL_VERSION,
                          uint64_t(nLocalNodeServices), nTime, addrYou, addrMe,
                          nonce, userAgent(config), nNodeStartingHeight,
                          ::g_relay_txes && pnode->m_tx_relay != nullptr));
 
     if (fLogIPs) {
         LogPrint(BCLog::NET,
                  "send version message: version %d, blocks=%d, us=%s, them=%s, "
                  "peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(),
                  addrYou.ToString(), nodeid);
     } else {
         LogPrint(
             BCLog::NET,
             "send version message: version %d, blocks=%d, us=%s, peer=%d\n",
             PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
     }
     LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
 }
 
 // Returns a bool indicating whether we requested this block.
 // Also used if a block was /not/ received and timed out or started with another
 // peer.
 static bool MarkBlockAsReceived(const BlockHash &hash)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<BlockHash,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end()) {
         CNodeState *state = State(itInFlight->second.first);
         assert(state != nullptr);
         state->nBlocksInFlightValidHeaders -=
             itInFlight->second.second->fValidatedHeaders;
         if (state->nBlocksInFlightValidHeaders == 0 &&
             itInFlight->second.second->fValidatedHeaders) {
             // Last validated block on the queue was received.
             nPeersWithValidatedDownloads--;
         }
         if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
             // First block on the queue was received, update the start download
             // time for the next one
             state->nDownloadingSince =
                 std::max(state->nDownloadingSince, GetTimeMicros());
         }
         state->vBlocksInFlight.erase(itInFlight->second.second);
         state->nBlocksInFlight--;
         state->nStallingSince = 0;
         mapBlocksInFlight.erase(itInFlight);
         return true;
     }
 
     return false;
 }
 
 // returns false, still setting pit, if the block was already in flight from the
 // same peer
 // pit will only be valid as long as the same cs_main lock is being held.
 static bool
 MarkBlockAsInFlight(const Config &config, NodeId nodeid, const BlockHash &hash,
                     const Consensus::Params &consensusParams,
                     const CBlockIndex *pindex = nullptr,
                     std::list<QueuedBlock>::iterator **pit = nullptr)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Short-circuit most stuff in case it is from the same node.
     std::map<BlockHash,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end() &&
         itInFlight->second.first == nodeid) {
         if (pit) {
             *pit = &itInFlight->second.second;
         }
         return false;
     }
 
     // Make sure it's not listed somewhere already.
     MarkBlockAsReceived(hash);
 
     std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
         state->vBlocksInFlight.end(),
         {hash, pindex, pindex != nullptr,
          std::unique_ptr<PartiallyDownloadedBlock>(
              pit ? new PartiallyDownloadedBlock(config, &g_mempool)
                  : nullptr)});
     state->nBlocksInFlight++;
     state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
     if (state->nBlocksInFlight == 1) {
         // We're starting a block download (batch) from this peer.
         state->nDownloadingSince = GetTimeMicros();
     }
 
     if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
         nPeersWithValidatedDownloads++;
     }
 
     itInFlight = mapBlocksInFlight
                      .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
                      .first;
 
     if (pit) {
         *pit = &itInFlight->second.second;
     }
 
     return true;
 }
 
 /** Check whether the last unknown block a peer advertised is not yet known. */
 static void ProcessBlockAvailability(NodeId nodeid)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (!state->hashLastUnknownBlock.IsNull()) {
         const CBlockIndex *pindex =
             LookupBlockIndex(state->hashLastUnknownBlock);
         if (pindex && pindex->nChainWork > 0) {
             if (state->pindexBestKnownBlock == nullptr ||
                 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
                 state->pindexBestKnownBlock = pindex;
             }
             state->hashLastUnknownBlock.SetNull();
         }
     }
 }
 
 /** Update tracking information about which blocks a peer is assumed to have. */
 static void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     ProcessBlockAvailability(nodeid);
 
     const CBlockIndex *pindex = LookupBlockIndex(hash);
     if (pindex && pindex->nChainWork > 0) {
         // An actually better block was announced.
         if (state->pindexBestKnownBlock == nullptr ||
             pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
             state->pindexBestKnownBlock = pindex;
         }
     } else {
         // An unknown block was announced; just assume that the latest one is
         // the best one.
         state->hashLastUnknownBlock = hash;
     }
 }
 
 /**
  * When a peer sends us a valid block, instruct it to announce blocks to us
  * using CMPCTBLOCK if possible by adding its nodeid to the end of
  * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by
  * removing the first element if necessary.
  */
 static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid,
                                                  CConnman *connman)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     CNodeState *nodestate = State(nodeid);
     if (!nodestate) {
         LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
         return;
     }
     if (!nodestate->fProvidesHeaderAndIDs) {
         return;
     }
     for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
          it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
         if (*it == nodeid) {
             lNodesAnnouncingHeaderAndIDs.erase(it);
             lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
             return;
         }
     }
     connman->ForNode(nodeid, [&connman](CNode *pfrom) {
         AssertLockHeld(cs_main);
         uint64_t nCMPCTBLOCKVersion = 1;
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
             // As per BIP152, we only get 3 of our peers to announce
             // blocks using compact encodings.
             connman->ForNode(
                 lNodesAnnouncingHeaderAndIDs.front(),
                 [&connman, nCMPCTBLOCKVersion](CNode *pnodeStop) {
                     AssertLockHeld(cs_main);
                     connman->PushMessage(
                         pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion())
                                        .Make(NetMsgType::SENDCMPCT,
                                              /*fAnnounceUsingCMPCTBLOCK=*/false,
                                              nCMPCTBLOCKVersion));
                     return true;
                 });
             lNodesAnnouncingHeaderAndIDs.pop_front();
         }
         connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion())
                                         .Make(NetMsgType::SENDCMPCT,
                                               /*fAnnounceUsingCMPCTBLOCK=*/true,
                                               nCMPCTBLOCKVersion));
         lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
         return true;
     });
 }
 
 static bool TipMayBeStale(const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     if (g_last_tip_update == 0) {
         g_last_tip_update = GetTime();
     }
     return g_last_tip_update <
                GetTime() - consensusParams.nPowTargetSpacing * 3 &&
            mapBlocksInFlight.empty();
 }
 
 static bool CanDirectFetch(const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     return ::ChainActive().Tip()->GetBlockTime() >
            GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
 }
 
 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (state->pindexBestKnownBlock &&
         pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
         return true;
     }
     if (state->pindexBestHeaderSent &&
         pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
         return true;
     }
     return false;
 }
 
 /**
  * Update pindexLastCommonBlock and add not-in-flight missing successors to
  * vBlocks, until it has at most count entries.
  */
 static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
                                      std::vector<const CBlockIndex *> &vBlocks,
                                      NodeId &nodeStaller,
                                      const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (count == 0) {
         return;
     }
 
     vBlocks.reserve(vBlocks.size() + count);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Make sure pindexBestKnownBlock is up to date, we'll need it.
     ProcessBlockAvailability(nodeid);
 
     if (state->pindexBestKnownBlock == nullptr ||
         state->pindexBestKnownBlock->nChainWork <
             ::ChainActive().Tip()->nChainWork ||
         state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
         // This peer has nothing interesting.
         return;
     }
 
     if (state->pindexLastCommonBlock == nullptr) {
         // Bootstrap quickly by guessing a parent of our best tip is the forking
         // point. Guessing wrong in either direction is not a problem.
         state->pindexLastCommonBlock = ::ChainActive()[std::min(
             state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())];
     }
 
     // If the peer reorganized, our previous pindexLastCommonBlock may not be an
     // ancestor of its current tip anymore. Go back enough to fix that.
     state->pindexLastCommonBlock = LastCommonAncestor(
         state->pindexLastCommonBlock, state->pindexBestKnownBlock);
     if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
         return;
     }
 
     std::vector<const CBlockIndex *> vToFetch;
     const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
     // Never fetch further than the best block we know the peer has, or more
     // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
     // common with this peer. The +1 is so we can detect stalling, namely if we
     // would be able to download that next block if the window were 1 larger.
     int nWindowEnd =
         state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
     int nMaxHeight =
         std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
     NodeId waitingfor = -1;
     while (pindexWalk->nHeight < nMaxHeight) {
         // Read up to 128 (or more, if more blocks than that are needed)
         // successors of pindexWalk (towards pindexBestKnownBlock) into
         // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
         // expensive as iterating over ~100 CBlockIndex* entries anyway.
         int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
                                 std::max<int>(count - vBlocks.size(), 128));
         vToFetch.resize(nToFetch);
         pindexWalk = state->pindexBestKnownBlock->GetAncestor(
             pindexWalk->nHeight + nToFetch);
         vToFetch[nToFetch - 1] = pindexWalk;
         for (unsigned int i = nToFetch - 1; i > 0; i--) {
             vToFetch[i - 1] = vToFetch[i]->pprev;
         }
 
         // Iterate over those blocks in vToFetch (in forward direction), adding
         // the ones that are not yet downloaded and not in flight to vBlocks. In
         // the meantime, update pindexLastCommonBlock as long as all ancestors
         // are already downloaded, or if it's already part of our chain (and
         // therefore don't need it even if pruned).
         for (const CBlockIndex *pindex : vToFetch) {
             if (!pindex->IsValid(BlockValidity::TREE)) {
                 // We consider the chain that this peer is on invalid.
                 return;
             }
             if (pindex->nStatus.hasData() || ::ChainActive().Contains(pindex)) {
                 if (pindex->HaveTxsDownloaded()) {
                     state->pindexLastCommonBlock = pindex;
                 }
             } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
                 // The block is not already downloaded, and not yet in flight.
                 if (pindex->nHeight > nWindowEnd) {
                     // We reached the end of the window.
                     if (vBlocks.size() == 0 && waitingfor != nodeid) {
                         // We aren't able to fetch anything, but we would be if
                         // the download window was one larger.
                         nodeStaller = waitingfor;
                     }
                     return;
                 }
                 vBlocks.push_back(pindex);
                 if (vBlocks.size() == count) {
                     return;
                 }
             } else if (waitingfor == -1) {
                 // This is the first already-in-flight block.
                 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
             }
         }
     }
 }
 
 void EraseTxRequest(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     g_already_asked_for.erase(txid);
 }
 
 std::chrono::microseconds GetTxRequestTime(const TxId &txid)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     auto it = g_already_asked_for.find(txid);
     if (it != g_already_asked_for.end()) {
         return it->second;
     }
     return {};
 }
 
 void UpdateTxRequestTime(const TxId &txid,
                          std::chrono::microseconds request_time)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     auto it = g_already_asked_for.find(txid);
     if (it == g_already_asked_for.end()) {
         g_already_asked_for.insert(std::make_pair(txid, request_time));
     } else {
         g_already_asked_for.update(it, request_time);
     }
 }
 
 std::chrono::microseconds
 CalculateTxGetDataTime(const TxId &txid, std::chrono::microseconds current_time,
                        bool use_inbound_delay)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::chrono::microseconds process_time;
     const auto last_request_time = GetTxRequestTime(txid);
     // First time requesting this tx
     if (last_request_time.count() == 0) {
         process_time = current_time;
     } else {
         // Randomize the delay to avoid biasing some peers over others (such as
         // due to fixed ordering of peer processing in ThreadMessageHandler)
         process_time = last_request_time + GETDATA_TX_INTERVAL +
                        GetRandMicros(MAX_GETDATA_RANDOM_DELAY);
     }
 
     // We delay processing announcements from inbound peers
     if (use_inbound_delay) {
         process_time += INBOUND_PEER_TX_DELAY;
     }
 
     return process_time;
 }
 
 void RequestTx(CNodeState *state, const TxId &txid,
                std::chrono::microseconds current_time)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     CNodeState::TxDownloadState &peer_download_state = state->m_tx_download;
     if (peer_download_state.m_tx_announced.size() >=
             MAX_PEER_TX_ANNOUNCEMENTS ||
         peer_download_state.m_tx_process_time.size() >=
             MAX_PEER_TX_ANNOUNCEMENTS ||
         peer_download_state.m_tx_announced.count(txid)) {
         // Too many queued announcements from this peer, or we already have
         // this announcement
         return;
     }
     peer_download_state.m_tx_announced.insert(txid);
 
     // Calculate the time to try requesting this transaction. Use
     // fPreferredDownload as a proxy for outbound peers.
     const auto process_time =
         CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload);
 
     peer_download_state.m_tx_process_time.emplace(process_time, txid);
 }
 
 } // namespace
 
 // This function is used for testing the stale tip eviction logic, see
 // denialofservice_tests.cpp
 void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) {
     LOCK(cs_main);
     CNodeState *state = State(node);
     if (state) {
         state->m_last_block_announcement = time_in_seconds;
     }
 }
 
 // Returns true for outbound peers, excluding manual connections, feelers, and
 // one-shots.
 static bool IsOutboundDisconnectionCandidate(const CNode *node) {
     return !(node->fInbound || node->m_manual_connection || node->fFeeler ||
              node->fOneShot);
 }
 
 void PeerLogicValidation::InitializeNode(const Config &config, CNode *pnode) {
     CAddress addr = pnode->addr;
     std::string addrName = pnode->GetAddrName();
     NodeId nodeid = pnode->GetId();
     {
         LOCK(cs_main);
         mapNodeState.emplace_hint(
             mapNodeState.end(), std::piecewise_construct,
             std::forward_as_tuple(nodeid),
             std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound,
                                   pnode->m_manual_connection));
     }
     if (!pnode->fInbound) {
         PushNodeVersion(config, pnode, connman, GetTime());
     }
 }
 
 void PeerLogicValidation::FinalizeNode(const Config &config, NodeId nodeid,
                                        bool &fUpdateConnectionTime) {
     fUpdateConnectionTime = false;
     LOCK(cs_main);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (state->fSyncStarted) {
         nSyncStarted--;
     }
 
     if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
         fUpdateConnectionTime = true;
     }
 
     for (const QueuedBlock &entry : state->vBlocksInFlight) {
         mapBlocksInFlight.erase(entry.hash);
     }
     EraseOrphansFor(nodeid);
     nPreferredDownload -= state->fPreferredDownload;
     nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
     assert(nPeersWithValidatedDownloads >= 0);
     g_outbound_peers_with_protect_from_disconnect -=
         state->m_chain_sync.m_protect;
     assert(g_outbound_peers_with_protect_from_disconnect >= 0);
 
     mapNodeState.erase(nodeid);
 
     if (mapNodeState.empty()) {
         // Do a consistency check after the last peer is removed.
         assert(mapBlocksInFlight.empty());
         assert(nPreferredDownload == 0);
         assert(nPeersWithValidatedDownloads == 0);
         assert(g_outbound_peers_with_protect_from_disconnect == 0);
     }
     LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
 }
 
 bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
     LOCK(cs_main);
     CNodeState *state = State(nodeid);
     if (state == nullptr) {
         return false;
     }
     stats.nMisbehavior = state->nMisbehavior;
     stats.nSyncHeight =
         state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
     stats.nCommonHeight = state->pindexLastCommonBlock
                               ? state->pindexLastCommonBlock->nHeight
                               : -1;
     for (const QueuedBlock &queue : state->vBlocksInFlight) {
         if (queue.pindex) {
             stats.vHeightInFlight.push_back(queue.pindex->nHeight);
         }
     }
     return true;
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // mapOrphanTransactions
 //
 
 static void AddToCompactExtraTransactions(const CTransactionRef &tx)
     EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
     size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn",
                                         DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
     if (max_extra_txn <= 0) {
         return;
     }
 
     if (!vExtraTxnForCompact.size()) {
         vExtraTxnForCompact.resize(max_extra_txn);
     }
 
     vExtraTxnForCompact[vExtraTxnForCompactIt] =
         std::make_pair(tx->GetHash(), tx);
     vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
 }
 
 bool AddOrphanTx(const CTransactionRef &tx, NodeId peer)
     EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
     const TxId &txid = tx->GetId();
     if (mapOrphanTransactions.count(txid)) {
         return false;
     }
 
     // Ignore big transactions, to avoid a send-big-orphans memory exhaustion
     // attack. If a peer has a legitimate large transaction with a missing
     // parent then we assume it will rebroadcast it later, after the parent
     // transaction(s) have been mined or received.
     // 100 orphans, each of which is at most 100,000 bytes big is at most 10
     // megabytes of orphans and somewhat more byprev index (in the worst case):
     unsigned int sz = tx->GetTotalSize();
     if (sz > MAX_STANDARD_TX_SIZE) {
         LogPrint(BCLog::MEMPOOL,
                  "ignoring large orphan tx (size: %u, hash: %s)\n", sz,
                  txid.ToString());
         return false;
     }
 
     auto ret = mapOrphanTransactions.emplace(
         txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME,
                         g_orphan_list.size()});
     assert(ret.second);
     g_orphan_list.push_back(ret.first);
     for (const CTxIn &txin : tx->vin) {
         mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
     }
 
     AddToCompactExtraTransactions(tx);
 
     LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n",
              txid.ToString(), mapOrphanTransactions.size(),
              mapOrphanTransactionsByPrev.size());
     return true;
 }
 
 static int EraseOrphanTx(const TxId id) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) {
     const auto it = mapOrphanTransactions.find(id);
     if (it == mapOrphanTransactions.end()) {
         return 0;
     }
     for (const CTxIn &txin : it->second.tx->vin) {
         const auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
         if (itPrev == mapOrphanTransactionsByPrev.end()) {
             continue;
         }
         itPrev->second.erase(it);
         if (itPrev->second.empty()) {
             mapOrphanTransactionsByPrev.erase(itPrev);
         }
     }
 
     size_t old_pos = it->second.list_pos;
     assert(g_orphan_list[old_pos] == it);
     if (old_pos + 1 != g_orphan_list.size()) {
         // Unless we're deleting the last entry in g_orphan_list, move the last
         // entry to the position we're deleting.
         auto it_last = g_orphan_list.back();
         g_orphan_list[old_pos] = it_last;
         it_last->second.list_pos = old_pos;
     }
     g_orphan_list.pop_back();
 
     mapOrphanTransactions.erase(it);
     return 1;
 }
 
 void EraseOrphansFor(NodeId peer) {
     LOCK(g_cs_orphans);
     int nErased = 0;
     auto iter = mapOrphanTransactions.begin();
     while (iter != mapOrphanTransactions.end()) {
         // Increment to avoid iterator becoming invalid.
         const auto maybeErase = iter++;
         if (maybeErase->second.fromPeer == peer) {
             nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
         }
     }
     if (nErased > 0) {
         LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased,
                  peer);
     }
 }
 
 unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) {
     LOCK(g_cs_orphans);
 
     unsigned int nEvicted = 0;
     static int64_t nNextSweep;
     int64_t nNow = GetTime();
     if (nNextSweep <= nNow) {
         // Sweep out expired orphan pool entries:
         int nErased = 0;
         int64_t nMinExpTime =
             nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
         auto iter = mapOrphanTransactions.begin();
         while (iter != mapOrphanTransactions.end()) {
             const auto maybeErase = iter++;
             if (maybeErase->second.nTimeExpire <= nNow) {
                 nErased += EraseOrphanTx(maybeErase->second.tx->GetId());
             } else {
                 nMinExpTime =
                     std::min(maybeErase->second.nTimeExpire, nMinExpTime);
             }
         }
         // Sweep again 5 minutes after the next entry that expires in order to
         // batch the linear scan.
         nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
         if (nErased > 0) {
             LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n",
                      nErased);
         }
     }
     FastRandomContext rng;
     while (mapOrphanTransactions.size() > nMaxOrphans) {
         // Evict a random orphan:
         size_t randompos = rng.randrange(g_orphan_list.size());
         EraseOrphanTx(g_orphan_list[randompos]->first);
         ++nEvicted;
     }
     return nEvicted;
 }
 
 /**
  * Mark a misbehaving peer to be banned depending upon the value of `-banscore`.
  */
 void Misbehaving(NodeId pnode, int howmuch, const std::string &reason)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (howmuch == 0) {
         return;
     }
 
     CNodeState *state = State(pnode);
     if (state == nullptr) {
         return;
     }
 
     state->nMisbehavior += howmuch;
     int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
     if (state->nMisbehavior >= banscore &&
         state->nMisbehavior - howmuch < banscore) {
         LogPrintf(
             "%s: %s peer=%d (%d -> %d) reason: %s BAN THRESHOLD EXCEEDED\n",
             __func__, state->name, pnode, state->nMisbehavior - howmuch,
             state->nMisbehavior, reason);
         state->fShouldBan = true;
     } else {
         LogPrintf("%s: %s peer=%d (%d -> %d) reason: %s\n", __func__,
                   state->name, pnode, state->nMisbehavior - howmuch,
                   state->nMisbehavior, reason);
     }
 }
 
 // overloaded variant of above to operate on CNode*s
 static void Misbehaving(CNode *node, int howmuch, const std::string &reason)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     Misbehaving(node->GetId(), howmuch, reason);
 }
 
 /**
  * Returns true if the given validation state result may result in a peer
  * banning/disconnecting us. We use this to determine which unaccepted
  * transactions from a whitelisted peer that we can safely relay.
  */
 static bool TxRelayMayResultInDisconnect(const CValidationState &state) {
     assert(IsTransactionReason(state.GetReason()));
     return state.GetReason() == ValidationInvalidReason::CONSENSUS;
 }
 
 /**
  * Potentially ban a node based on the contents of a CValidationState object
  *
  * @param[in] via_compact_block: this bool is passed in because net_processing
  * should punish peers differently depending on whether the data was provided in
  * a compact block message or not. If the compact block had a valid header, but
  * contained invalid txs, the peer should not be punished. See BIP 152.
  *
  * @return Returns true if the peer was punished (probably disconnected)
  *
  * Changes here may need to be reflected in TxRelayMayResultInDisconnect().
  */
 static bool MaybePunishNode(NodeId nodeid, const CValidationState &state,
                             bool via_compact_block,
                             const std::string &message = "") {
     switch (state.GetReason()) {
         case ValidationInvalidReason::NONE:
             break;
         // The node is providing invalid data:
         case ValidationInvalidReason::CONSENSUS:
         case ValidationInvalidReason::BLOCK_MUTATED:
             if (!via_compact_block) {
                 LOCK(cs_main);
                 Misbehaving(nodeid, 100, message);
                 return true;
             }
             break;
         case ValidationInvalidReason::CACHED_INVALID: {
             LOCK(cs_main);
             CNodeState *node_state = State(nodeid);
             if (node_state == nullptr) {
                 break;
             }
 
             // Ban outbound (but not inbound) peers if on an invalid chain.
             // Exempt HB compact block peers and manual connections.
             if (!via_compact_block && !node_state->m_is_inbound &&
                 !node_state->m_is_manual_connection) {
                 Misbehaving(nodeid, 100, message);
                 return true;
             }
             break;
         }
         case ValidationInvalidReason::BLOCK_INVALID_HEADER:
         case ValidationInvalidReason::BLOCK_CHECKPOINT:
         case ValidationInvalidReason::BLOCK_INVALID_PREV: {
             LOCK(cs_main);
             Misbehaving(nodeid, 100, message);
         }
             return true;
         case ValidationInvalidReason::BLOCK_FINALIZATION: {
             // TODO: Use the state object to report this is probably not the
             // best idea. This is effectively unreachable, unless there is a bug
             // somewhere.
             LOCK(cs_main);
             Misbehaving(nodeid, 20, message);
         }
             return true;
         // Conflicting (but not necessarily invalid) data or different policy:
         case ValidationInvalidReason::BLOCK_MISSING_PREV: {
             // TODO: Handle this much more gracefully (10 DoS points is super
             // arbitrary)
             LOCK(cs_main);
             Misbehaving(nodeid, 10, message);
         }
             return true;
         case ValidationInvalidReason::RECENT_CONSENSUS_CHANGE:
         case ValidationInvalidReason::BLOCK_TIME_FUTURE:
         case ValidationInvalidReason::TX_NOT_STANDARD:
         case ValidationInvalidReason::TX_MISSING_INPUTS:
         case ValidationInvalidReason::TX_PREMATURE_SPEND:
         case ValidationInvalidReason::TX_CONFLICT:
         case ValidationInvalidReason::TX_MEMPOOL_POLICY:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // blockchain -> download logic notification
 //
 
 // To prevent fingerprinting attacks, only send blocks/headers outside of the
 // active chain if they are no more than a month older (both in time, and in
 // best equivalent proof of work) than the best header chain we know about and
 // we fully-validated them at some point.
 static bool BlockRequestAllowed(const CBlockIndex *pindex,
                                 const Consensus::Params &consensusParams)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     if (::ChainActive().Contains(pindex)) {
         return true;
     }
     return pindex->IsValid(BlockValidity::SCRIPTS) &&
            (pindexBestHeader != nullptr) &&
            (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() <
             STALE_RELAY_AGE_LIMIT) &&
            (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex,
                                         *pindexBestHeader, consensusParams) <
             STALE_RELAY_AGE_LIMIT);
 }
 
 PeerLogicValidation::PeerLogicValidation(CConnman *connmanIn, BanMan *banman,
                                          CScheduler &scheduler,
                                          bool enable_bip61)
     : connman(connmanIn), m_banman(banman), m_stale_tip_check_time(0),
       m_enable_bip61(enable_bip61) {
     // Initialize global variables that cannot be constructed at startup.
     recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
 
     const Consensus::Params &consensusParams = Params().GetConsensus();
     // Stale tip checking and peer eviction are on two different timers, but we
     // don't want them to get out of sync due to drift in the scheduler, so we
     // combine them in one function and schedule at the quicker (peer-eviction)
     // timer.
     static_assert(
         EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL,
         "peer eviction timer should be less than stale tip check timer");
     scheduler.scheduleEvery(
         [this, &consensusParams]() {
             this->CheckForStaleTipAndEvictPeers(consensusParams);
             return true;
         },
         std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
 }
 
 /**
  * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected
  * block. Also save the time of the last tip update.
  */
 void PeerLogicValidation::BlockConnected(
     const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex,
     const std::vector<CTransactionRef> &vtxConflicted) {
     LOCK(g_cs_orphans);
 
     std::vector<TxId> vOrphanErase;
 
     for (const CTransactionRef &ptx : pblock->vtx) {
         const CTransaction &tx = *ptx;
 
         // Which orphan pool entries must we evict?
         for (const auto &txin : tx.vin) {
             auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
             if (itByPrev == mapOrphanTransactionsByPrev.end()) {
                 continue;
             }
 
             for (auto mi = itByPrev->second.begin();
                  mi != itByPrev->second.end(); ++mi) {
                 const CTransaction &orphanTx = *(*mi)->second.tx;
                 const TxId &orphanId = orphanTx.GetId();
                 vOrphanErase.push_back(orphanId);
             }
         }
     }
 
     // Erase orphan transactions included or precluded by this block
     if (vOrphanErase.size()) {
         int nErased = 0;
         for (const auto &orphanId : vOrphanErase) {
             nErased += EraseOrphanTx(orphanId);
         }
         LogPrint(BCLog::MEMPOOL,
                  "Erased %d orphan tx included or conflicted by block\n",
                  nErased);
     }
 
     g_last_tip_update = GetTime();
 }
 
 // All of the following cache a recent block, and are protected by
 // cs_most_recent_block
 static RecursiveMutex cs_most_recent_block;
 static std::shared_ptr<const CBlock>
     most_recent_block GUARDED_BY(cs_most_recent_block);
 static std::shared_ptr<const CBlockHeaderAndShortTxIDs>
     most_recent_compact_block GUARDED_BY(cs_most_recent_block);
 static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block);
 
 /**
  * Maintain state about the best-seen block and fast-announce a compact block
  * to compatible peers.
  */
 void PeerLogicValidation::NewPoWValidBlock(
     const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
         std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
     const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
 
     LOCK(cs_main);
 
     static int nHighestFastAnnounce = 0;
     if (pindex->nHeight <= nHighestFastAnnounce) {
         return;
     }
     nHighestFastAnnounce = pindex->nHeight;
 
     uint256 hashBlock(pblock->GetHash());
 
     {
         LOCK(cs_most_recent_block);
         most_recent_block_hash = hashBlock;
         most_recent_block = pblock;
         most_recent_compact_block = pcmpctblock;
     }
 
     connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker,
                           &hashBlock](CNode *pnode) {
         AssertLockHeld(cs_main);
 
         // TODO: Avoid the repeated-serialization here
         if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) {
             return;
         }
         ProcessBlockAvailability(pnode->GetId());
         CNodeState &state = *State(pnode->GetId());
         // If the peer has, or we announced to them the previous block already,
         // but we don't think they have this one, go ahead and announce it.
         if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) &&
             PeerHasHeader(&state, pindex->pprev)) {
             LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n",
                      "PeerLogicValidation::NewPoWValidBlock",
                      hashBlock.ToString(), pnode->GetId());
             connman->PushMessage(
                 pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
             state.pindexBestHeaderSent = pindex;
         }
     });
 }
 
 /**
  * Update our best height and announce any block hashes which weren't previously
  * in ::ChainActive() to our peers.
  */
 void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                           const CBlockIndex *pindexFork,
                                           bool fInitialDownload) {
     const int nNewHeight = pindexNew->nHeight;
     connman->SetBestHeight(nNewHeight);
 
     SetServiceFlagsIBDCache(!fInitialDownload);
     if (!fInitialDownload) {
         // Find the hashes of all blocks that weren't previously in the best
         // chain.
         std::vector<BlockHash> vHashes;
         const CBlockIndex *pindexToAnnounce = pindexNew;
         while (pindexToAnnounce != pindexFork) {
             vHashes.push_back(pindexToAnnounce->GetBlockHash());
             pindexToAnnounce = pindexToAnnounce->pprev;
             if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
                 // Limit announcements in case of a huge reorganization. Rely on
                 // the peer's synchronization mechanism in that case.
                 break;
             }
         }
         // Relay inventory, but don't relay old inventory during initial block
         // download.
         connman->ForEachNode([nNewHeight, &vHashes](CNode *pnode) {
             if (nNewHeight > (pnode->nStartingHeight != -1
                                   ? pnode->nStartingHeight - 2000
                                   : 0)) {
                 for (const BlockHash &hash : reverse_iterate(vHashes)) {
                     pnode->PushBlockHash(hash);
                 }
             }
         });
         connman->WakeMessageHandler();
     }
 }
 
 /**
  * Handle invalid block rejection and consequent peer banning, maintain which
  * peers announce compact blocks.
  */
 void PeerLogicValidation::BlockChecked(const CBlock &block,
                                        const CValidationState &state) {
     LOCK(cs_main);
 
     const BlockHash hash = block.GetHash();
     std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
         mapBlockSource.find(hash);
 
     if (state.IsInvalid()) {
         // Don't send reject message with code 0 or an internal reject code.
         if (it != mapBlockSource.end() && State(it->second.first) &&
             state.GetRejectCode() > 0 &&
             state.GetRejectCode() < REJECT_INTERNAL) {
             CBlockReject reject = {
                 uint8_t(state.GetRejectCode()),
                 state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH),
                 hash};
             State(it->second.first)->rejects.push_back(reject);
             MaybePunishNode(/*nodeid=*/it->second.first, state,
                             /*via_compact_block=*/!it->second.second);
         }
     }
     // Check that:
     // 1. The block is valid
     // 2. We're not in initial block download
     // 3. This is currently the best block we're aware of. We haven't updated
     //    the tip yet so we have no way to check this directly here. Instead we
     //    just check that there are currently no other blocks in flight.
     else if (state.IsValid() &&
              !::ChainstateActive().IsInitialBlockDownload() &&
              mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
         if (it != mapBlockSource.end()) {
             MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
         }
     }
 
     if (it != mapBlockSource.end()) {
         mapBlockSource.erase(it);
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Messages
 //
 
 static bool AlreadyHave(const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     switch (inv.type) {
         case MSG_TX: {
             assert(recentRejects);
             if (::ChainActive().Tip()->GetBlockHash() !=
                 hashRecentRejectsChainTip) {
                 // If the chain tip has changed previously rejected transactions
                 // might be now valid, e.g. due to a nLockTime'd tx becoming
                 // valid, or a double-spend. Reset the rejects filter and give
                 // those txs a second chance.
                 hashRecentRejectsChainTip =
                     ::ChainActive().Tip()->GetBlockHash();
                 recentRejects->reset();
             }
 
             {
                 LOCK(g_cs_orphans);
                 if (mapOrphanTransactions.count(TxId{inv.hash})) {
                     return true;
                 }
             }
 
             // Use pcoinsTip->HaveCoinInCache as a quick approximation to
             // exclude requesting or processing some txs which have already been
             // included in a block. As this is best effort, we only check for
             // output 0 and 1. This works well enough in practice and we get
             // diminishing returns with 2 onward.
             const TxId txid(inv.hash);
             return recentRejects->contains(inv.hash) ||
                    g_mempool.exists(txid) ||
                    pcoinsTip->HaveCoinInCache(COutPoint(txid, 0)) ||
                    pcoinsTip->HaveCoinInCache(COutPoint(txid, 1));
         }
         case MSG_BLOCK:
             return LookupBlockIndex(BlockHash(inv.hash)) != nullptr;
     }
     // Don't know what it is, just say we already got one
     return true;
 }
 
 void RelayTransaction(const TxId &txid, const CConnman &connman) {
     CInv inv(MSG_TX, txid);
     connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); });
 }
 
 static void RelayAddress(const CAddress &addr, bool fReachable,
                          CConnman *connman) {
     // Limited relaying of addresses outside our network(s)
     unsigned int nRelayNodes = fReachable ? 2 : 1;
 
     // Relay to a limited number of other nodes.
     // Use deterministic randomness to send to the same nodes for 24 hours at a
     // time so the addrKnowns of the chosen nodes prevent repeats.
     uint64_t hashAddr = addr.GetHash();
     const CSipHasher hasher =
         connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
             .Write(hashAddr << 32)
             .Write((GetTime() + hashAddr) / (24 * 60 * 60));
     FastRandomContext insecure_rand;
 
     std::array<std::pair<uint64_t, CNode *>, 2> best{
         {{0, nullptr}, {0, nullptr}}};
     assert(nRelayNodes <= best.size());
 
     auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) {
         if (pnode->nVersion >= CADDR_TIME_VERSION && pnode->IsAddrRelayPeer()) {
             uint64_t hashKey =
                 CSipHasher(hasher).Write(pnode->GetId()).Finalize();
             for (unsigned int i = 0; i < nRelayNodes; i++) {
                 if (hashKey > best[i].first) {
                     std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
                               best.begin() + i + 1);
                     best[i] = std::make_pair(hashKey, pnode);
                     break;
                 }
             }
         }
     };
 
     auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
         for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
             best[i].second->PushAddress(addr, insecure_rand);
         }
     };
 
     connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
 }
 
 static void ProcessGetBlockData(const Config &config, CNode *pfrom,
                                 const CInv &inv, CConnman *connman,
                                 const std::atomic<bool> &interruptMsgProc) {
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     const BlockHash hash(inv.hash);
 
     bool send = false;
     std::shared_ptr<const CBlock> a_recent_block;
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
     {
         LOCK(cs_most_recent_block);
         a_recent_block = most_recent_block;
         a_recent_compact_block = most_recent_compact_block;
     }
 
     bool need_activate_chain = false;
     {
         LOCK(cs_main);
         const CBlockIndex *pindex = LookupBlockIndex(hash);
         if (pindex) {
             if (pindex->HaveTxsDownloaded() &&
                 !pindex->IsValid(BlockValidity::SCRIPTS) &&
                 pindex->IsValid(BlockValidity::TREE)) {
                 // If we have the block and all of its parents, but have not yet
                 // validated it, we might be in the middle of connecting it (ie
                 // in the unlock of cs_main before ActivateBestChain but after
                 // AcceptBlock). In this case, we need to run ActivateBestChain
                 // prior to checking the relay conditions below.
                 need_activate_chain = true;
             }
         }
     } // release cs_main before calling ActivateBestChain
     if (need_activate_chain) {
         CValidationState state;
         if (!ActivateBestChain(config, state, a_recent_block)) {
             LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                      FormatStateMessage(state));
         }
     }
 
     LOCK(cs_main);
     const CBlockIndex *pindex = LookupBlockIndex(hash);
     if (pindex) {
         send = BlockRequestAllowed(pindex, consensusParams);
         if (!send) {
             LogPrint(BCLog::NET,
                      "%s: ignoring request from peer=%i for old "
                      "block that isn't in the main chain\n",
                      __func__, pfrom->GetId());
         }
     }
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     // Disconnect node in case we have reached the outbound limit for serving
     // historical blocks.
     // Never disconnect whitelisted nodes.
     if (send && connman->OutboundTargetReached(true) &&
         (((pindexBestHeader != nullptr) &&
           (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() >
            HISTORICAL_BLOCK_AGE)) ||
          inv.type == MSG_FILTERED_BLOCK) &&
         !pfrom->HasPermission(PF_NOBAN)) {
         LogPrint(BCLog::NET,
                  "historical block serving limit reached, disconnect peer=%d\n",
                  pfrom->GetId());
 
         // disconnect node
         pfrom->fDisconnect = true;
         send = false;
     }
     // Avoid leaking prune-height by never sending blocks below the
     // NODE_NETWORK_LIMITED threshold.
     // Add two blocks buffer extension for possible races
     if (send && !pfrom->HasPermission(PF_NOBAN) &&
         ((((pfrom->GetLocalServices() & NODE_NETWORK_LIMITED) ==
            NODE_NETWORK_LIMITED) &&
           ((pfrom->GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) &&
           (::ChainActive().Tip()->nHeight - pindex->nHeight >
            (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
         LogPrint(BCLog::NET,
                  "Ignore block request below NODE_NETWORK_LIMITED "
                  "threshold from peer=%d\n",
                  pfrom->GetId());
 
         // disconnect node and prevent it from stalling (would otherwise wait
         // for the missing block)
         pfrom->fDisconnect = true;
         send = false;
     }
     // Pruned nodes may have deleted the block, so check whether it's available
     // before trying to send.
     if (send && pindex->nStatus.hasData()) {
         std::shared_ptr<const CBlock> pblock;
         if (a_recent_block &&
             a_recent_block->GetHash() == pindex->GetBlockHash()) {
             pblock = a_recent_block;
         } else {
             // Send block from disk
             std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
             if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) {
                 assert(!"cannot load block from disk");
             }
             pblock = pblockRead;
         }
         if (inv.type == MSG_BLOCK) {
             connman->PushMessage(pfrom,
                                  msgMaker.Make(NetMsgType::BLOCK, *pblock));
         } else if (inv.type == MSG_FILTERED_BLOCK) {
             bool sendMerkleBlock = false;
             CMerkleBlock merkleBlock;
             if (pfrom->m_tx_relay != nullptr) {
                 LOCK(pfrom->m_tx_relay->cs_filter);
                 if (pfrom->m_tx_relay->pfilter) {
                     sendMerkleBlock = true;
                     merkleBlock =
                         CMerkleBlock(*pblock, *pfrom->m_tx_relay->pfilter);
                 }
             }
             if (sendMerkleBlock) {
                 connman->PushMessage(
                     pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
                 // CMerkleBlock just contains hashes, so also push any
                 // transactions in the block the client did not see. This avoids
                 // hurting performance by pointlessly requiring a round-trip.
                 // Note that there is currently no way for a node to request any
                 // single transactions we didn't send here - they must either
                 // disconnect and retry or request the full block. Thus, the
                 // protocol spec specified allows for us to provide duplicate
                 // txn here, however we MUST always provide at least what the
                 // remote peer needs.
                 typedef std::pair<size_t, uint256> PairType;
                 for (PairType &pair : merkleBlock.vMatchedTxn) {
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::TX,
                                              *pblock->vtx[pair.first]));
                 }
             }
             // else
             // no response
         } else if (inv.type == MSG_CMPCT_BLOCK) {
             // If a peer is asking for old blocks, we're almost guaranteed they
             // won't have a useful mempool to match against a compact block, and
             // we don't feel like constructing the object for them, so instead
             // we respond with the full, non-compact block.
             int nSendFlags = 0;
             if (CanDirectFetch(consensusParams) &&
                 pindex->nHeight >=
                     ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) {
                 CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
                 connman->PushMessage(
                     pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                          cmpctblock));
             } else {
                 connman->PushMessage(
                     pfrom,
                     msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
             }
         }
 
         // Trigger the peer node to send a getblocks request for the next batch
         // of inventory.
         if (hash == pfrom->hashContinue) {
             // Bypass PushInventory, this must send even if redundant, and we
             // want it right after the last block so they don't wait for other
             // stuff first.
             std::vector<CInv> vInv;
             vInv.push_back(
                 CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash()));
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
             pfrom->hashContinue = BlockHash();
         }
     }
 }
 
 static void ProcessGetData(const Config &config, CNode *pfrom,
                            CConnman *connman,
                            const std::atomic<bool> &interruptMsgProc)
     LOCKS_EXCLUDED(cs_main) {
     AssertLockNotHeld(cs_main);
 
     std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
     std::vector<CInv> vNotFound;
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
 
     // Note that if we receive a getdata for a MSG_TX or MSG_WITNESS_TX from a
     // block-relay-only outbound peer, we will stop processing further getdata
     // messages from this peer (likely resulting in our peer eventually
     // disconnecting us).
     if (pfrom->m_tx_relay != nullptr) {
         // mempool entries added before this time have likely expired from
         // mapRelay
         const std::chrono::seconds longlived_mempool_time =
             GetTime<std::chrono::seconds>() - RELAY_TX_CACHE_TIME;
         const std::chrono::seconds mempool_req =
             pfrom->m_tx_relay->m_last_mempool_req.load();
 
         LOCK(cs_main);
 
         while (it != pfrom->vRecvGetData.end() && it->type == MSG_TX) {
             if (interruptMsgProc) {
                 return;
             }
             // Don't bother if send buffer is too full to respond anyway.
             if (pfrom->fPauseSend) {
                 break;
             }
 
             const CInv &inv = *it;
             it++;
 
             // Send stream from relay memory
             bool push = false;
             auto mi = mapRelay.find(inv.hash);
             int nSendFlags = 0;
             if (mi != mapRelay.end()) {
                 connman->PushMessage(
                     pfrom,
                     msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
                 push = true;
             } else {
                 auto txinfo = g_mempool.info(TxId(inv.hash));
                 // To protect privacy, do not answer getdata using the mempool
                 // when that TX couldn't have been INVed in reply to a MEMPOOL
                 // request, or when it's too recent to have expired from
                 // mapRelay.
                 if (txinfo.tx &&
                     ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
                      (txinfo.m_time <= longlived_mempool_time))) {
                     connman->PushMessage(
                         pfrom,
                         msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
                     push = true;
                 }
             }
             if (!push) {
                 vNotFound.push_back(inv);
             }
         }
     } // release cs_main
 
     if (it != pfrom->vRecvGetData.end() && !pfrom->fPauseSend) {
         const CInv &inv = *it;
         if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK ||
             inv.type == MSG_CMPCT_BLOCK) {
             it++;
             ProcessGetBlockData(config, pfrom, inv, connman, interruptMsgProc);
         }
     }
 
     // Unknown types in the GetData stay in vRecvGetData and block any future
     // message from this peer, see vRecvGetData check in ProcessMessages().
     // Depending on future p2p changes, we might either drop unknown getdata on
     // the floor or disconnect the peer.
 
     pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
 
     if (!vNotFound.empty()) {
         // Let the peer know that we didn't find what it asked for, so it
         // doesn't have to wait around forever. SPV clients care about this
         // message: it's needed when they are recursively walking the
         // dependencies of relevant unconfirmed transactions. SPV clients want
         // to do that because they want to know about (and store and rebroadcast
         // and risk analyze) the dependencies of transactions relevant to them,
         // without having to download the entire memory pool. Also, other nodes
         // can use these messages to automatically request a transaction from
         // some other peer that annnounced it, and stop waiting for us to
         // respond. In normal operation, we often send NOTFOUND messages for
         // parents of transactions that we relay; if a peer is missing a parent,
         // they may assume we have them and request the parents from us.
         connman->PushMessage(pfrom,
                              msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
     }
 }
 
 inline static void SendBlockTransactions(const CBlock &block,
                                          const BlockTransactionsRequest &req,
                                          CNode *pfrom, CConnman *connman) {
     BlockTransactions resp(req);
     for (size_t i = 0; i < req.indices.size(); i++) {
         if (req.indices[i] >= block.vtx.size()) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "out-of-bound-tx-index");
             LogPrintf(
                 "Peer %d sent us a getblocktxn with out-of-bounds tx indices\n",
                 pfrom->GetId());
             return;
         }
         resp.txn[i] = block.vtx[req.indices[i]];
     }
     LOCK(cs_main);
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     int nSendFlags = 0;
     connman->PushMessage(pfrom,
                          msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
 }
 
 static bool ProcessHeadersMessage(const Config &config, CNode *pfrom,
                                   CConnman *connman,
                                   const std::vector<CBlockHeader> &headers,
                                   bool via_compact_block) {
     const CChainParams &chainparams = config.GetChainParams();
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
     size_t nCount = headers.size();
 
     if (nCount == 0) {
         // Nothing interesting. Stop asking this peers for more headers.
         return true;
     }
 
     bool received_new_header = false;
     const CBlockIndex *pindexLast = nullptr;
     {
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom->GetId());
 
         // If this looks like it could be a block announcement (nCount <
         // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
         // don't connect:
         // - Send a getheaders message in response to try to connect the chain.
         // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
         // don't connect before giving DoS points
         // - Once a headers message is received that is valid and does connect,
         // nUnconnectingHeaders gets reset back to 0.
         if (!LookupBlockIndex(headers[0].hashPrevBlock) &&
             nCount < MAX_BLOCKS_TO_ANNOUNCE) {
             nodestate->nUnconnectingHeaders++;
             connman->PushMessage(
                 pfrom,
                 msgMaker.Make(NetMsgType::GETHEADERS,
                               ::ChainActive().GetLocator(pindexBestHeader),
                               uint256()));
             LogPrint(
                 BCLog::NET,
                 "received header %s: missing prev block %s, sending getheaders "
                 "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
                 headers[0].GetHash().ToString(),
                 headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight,
                 pfrom->GetId(), nodestate->nUnconnectingHeaders);
             // Set hashLastUnknownBlock for this peer, so that if we eventually
             // get the headers - even from a different peer - we can use this
             // peer to download.
             UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
 
             if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS ==
                 0) {
                 // The peer is sending us many headers we can't connect.
                 Misbehaving(pfrom, 20, "too-many-unconnected-headers");
             }
             return true;
         }
 
         BlockHash hashLastBlock;
         for (const CBlockHeader &header : headers) {
             if (!hashLastBlock.IsNull() &&
                 header.hashPrevBlock != hashLastBlock) {
                 Misbehaving(pfrom, 20, "disconnected-header");
                 return error("non-continuous headers sequence");
             }
             hashLastBlock = header.GetHash();
         }
 
         // If we don't have the last header, then they'll have given us
         // something new (if these headers are valid).
         if (!LookupBlockIndex(hashLastBlock)) {
             received_new_header = true;
         }
     }
 
     CValidationState state;
     CBlockHeader first_invalid_header;
     if (!ProcessNewBlockHeaders(config, headers, state, &pindexLast,
                                 &first_invalid_header)) {
         if (state.IsInvalid()) {
             MaybePunishNode(pfrom->GetId(), state, via_compact_block,
                             "invalid header received");
             return false;
         }
     }
 
     {
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom->GetId());
         if (nodestate->nUnconnectingHeaders > 0) {
             LogPrint(BCLog::NET,
                      "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n",
                      pfrom->GetId(), nodestate->nUnconnectingHeaders);
         }
         nodestate->nUnconnectingHeaders = 0;
 
         assert(pindexLast);
         UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
 
         // From here, pindexBestKnownBlock should be guaranteed to be non-null,
         // because it is set in UpdateBlockAvailability. Some nullptr checks are
         // still present, however, as belt-and-suspenders.
 
         if (received_new_header &&
             pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) {
             nodestate->m_last_block_announcement = GetTime();
         }
 
         if (nCount == MAX_HEADERS_RESULTS) {
             // Headers message had its maximum size; the peer may have more
             // headers.
             // TODO: optimize: if pindexLast is an ancestor of
             // ::ChainActive().Tip or pindexBestHeader, continue from there
             // instead.
             LogPrint(
                 BCLog::NET,
                 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
                 pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
             connman->PushMessage(
                 pfrom, msgMaker.Make(NetMsgType::GETHEADERS,
                                      ::ChainActive().GetLocator(pindexLast),
                                      uint256()));
         }
 
         bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
         // If this set of headers is valid and ends in a block with at least as
         // much work as our tip, download as much as possible.
         if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) &&
             ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) {
             std::vector<const CBlockIndex *> vToFetch;
             const CBlockIndex *pindexWalk = pindexLast;
             // Calculate all the blocks we'd need to switch to pindexLast, up to
             // a limit.
             while (pindexWalk && !::ChainActive().Contains(pindexWalk) &&
                    vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                 if (!pindexWalk->nStatus.hasData() &&
                     !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) {
                     // We don't have this block, and it's not yet in flight.
                     vToFetch.push_back(pindexWalk);
                 }
                 pindexWalk = pindexWalk->pprev;
             }
             // If pindexWalk still isn't on our main chain, we're looking at a
             // very large reorg at a time we think we're close to caught up to
             // the main chain -- this shouldn't really happen. Bail out on the
             // direct fetch and rely on parallel download instead.
             if (!::ChainActive().Contains(pindexWalk)) {
                 LogPrint(
                     BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
                     pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
             } else {
                 std::vector<CInv> vGetData;
                 // Download as much as possible, from earliest to latest.
                 for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
                     if (nodestate->nBlocksInFlight >=
                         MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                         // Can't download any more from this peer
                         break;
                     }
                     vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
                     MarkBlockAsInFlight(config, pfrom->GetId(),
                                         pindex->GetBlockHash(),
                                         chainparams.GetConsensus(), pindex);
                     LogPrint(BCLog::NET, "Requesting block %s from  peer=%d\n",
                              pindex->GetBlockHash().ToString(), pfrom->GetId());
                 }
                 if (vGetData.size() > 1) {
                     LogPrint(BCLog::NET,
                              "Downloading blocks toward %s (%d) via headers "
                              "direct fetch\n",
                              pindexLast->GetBlockHash().ToString(),
                              pindexLast->nHeight);
                 }
                 if (vGetData.size() > 0) {
                     if (nodestate->fSupportsDesiredCmpctVersion &&
                         vGetData.size() == 1 && mapBlocksInFlight.size() == 1 &&
                         pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
                         // In any case, we want to download using a compact
                         // block, not a regular one.
                         vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
                     }
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
                 }
             }
         }
         // If we're in IBD, we want outbound peers that will serve us a useful
         // chain. Disconnect peers that are on chains with insufficient work.
         if (::ChainstateActive().IsInitialBlockDownload() &&
             nCount != MAX_HEADERS_RESULTS) {
             // When nCount < MAX_HEADERS_RESULTS, we know we have no more
             // headers to fetch from this peer.
             if (nodestate->pindexBestKnownBlock &&
                 nodestate->pindexBestKnownBlock->nChainWork <
                     nMinimumChainWork) {
                 // This peer has too little work on their headers chain to help
                 // us sync -- disconnect if using an outbound slot (unless
                 // whitelisted or addnode).
                 // Note: We compare their tip to nMinimumChainWork (rather than
                 // ::ChainActive().Tip()) because we won't start block download
                 // until we have a headers chain that has at least
                 // nMinimumChainWork, even if a peer has a chain past our tip,
                 // as an anti-DoS measure.
                 if (IsOutboundDisconnectionCandidate(pfrom)) {
                     LogPrintf("Disconnecting outbound peer %d -- headers "
                               "chain has insufficient work\n",
                               pfrom->GetId());
                     pfrom->fDisconnect = true;
                 }
             }
         }
 
         if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) &&
             nodestate->pindexBestKnownBlock != nullptr &&
             pfrom->m_tx_relay != nullptr) {
             // If this is an outbound full-relay peer, check to see if we should
             // protect it from the bad/lagging chain logic. Note that
             // block-relay-only peers are already implicitly protected, so we
             // only consider setting m_protect for the full-relay peers.
             if (g_outbound_peers_with_protect_from_disconnect <
                     MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT &&
                 nodestate->pindexBestKnownBlock->nChainWork >=
                     ::ChainActive().Tip()->nChainWork &&
                 !nodestate->m_chain_sync.m_protect) {
                 LogPrint(BCLog::NET,
                          "Protecting outbound peer=%d from eviction\n",
                          pfrom->GetId());
                 nodestate->m_chain_sync.m_protect = true;
                 ++g_outbound_peers_with_protect_from_disconnect;
             }
         }
     }
 
     return true;
 }
 
 void static ProcessOrphanTx(const Config &config, CConnman *connman,
                             std::set<TxId> &orphan_work_set)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) {
     AssertLockHeld(cs_main);
     AssertLockHeld(g_cs_orphans);
     std::unordered_map<NodeId, uint32_t> rejectCountPerNode;
     bool done = false;
     while (!done && !orphan_work_set.empty()) {
         const TxId orphanTxId = *orphan_work_set.begin();
         orphan_work_set.erase(orphan_work_set.begin());
 
         auto orphan_it = mapOrphanTransactions.find(orphanTxId);
         if (orphan_it == mapOrphanTransactions.end()) {
             continue;
         }
 
         const CTransactionRef porphanTx = orphan_it->second.tx;
         const CTransaction &orphanTx = *porphanTx;
         NodeId fromPeer = orphan_it->second.fromPeer;
         bool fMissingInputs2 = false;
         // Use a new CValidationState because orphans come from different peers
         // (and we call MaybePunishNode based on the source peer from the orphan
         // map, not based on the peer that relayed the previous transaction).
         CValidationState orphan_state;
 
         auto it = rejectCountPerNode.find(fromPeer);
         if (it != rejectCountPerNode.end() &&
             it->second > MAX_NON_STANDARD_ORPHAN_PER_NODE) {
             continue;
         }
 
         if (AcceptToMemoryPool(config, g_mempool, orphan_state, porphanTx,
                                &fMissingInputs2, false /* bypass_limits */,
                                Amount::zero() /* nAbsurdFee */)) {
             LogPrint(BCLog::MEMPOOL, "   accepted orphan tx %s\n",
                      orphanTxId.ToString());
             RelayTransaction(orphanTxId, *connman);
             for (size_t i = 0; i < orphanTx.vout.size(); i++) {
                 auto it_by_prev =
                     mapOrphanTransactionsByPrev.find(COutPoint(orphanTxId, i));
                 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
                     for (const auto &elem : it_by_prev->second) {
                         orphan_work_set.insert(elem->first);
                     }
                 }
             }
             EraseOrphanTx(orphanTxId);
             done = true;
         } else if (!fMissingInputs2) {
             if (orphan_state.IsInvalid()) {
                 // Punish peer that gave us an invalid orphan tx
                 MaybePunishNode(fromPeer, orphan_state,
                                 /*via_compact_block*/ false);
                 LogPrint(BCLog::MEMPOOL, "   invalid orphan tx %s\n",
                          orphanTxId.ToString());
             }
             // Has inputs but not accepted to mempool
             // Probably non-standard or insufficient fee
             LogPrint(BCLog::MEMPOOL, "   removed orphan tx %s\n",
                      orphanTxId.ToString());
             assert(IsTransactionReason(orphan_state.GetReason()));
 
             assert(recentRejects);
             recentRejects->insert(orphanTxId);
 
             EraseOrphanTx(orphanTxId);
             done = true;
         }
         g_mempool.check(pcoinsTip.get());
     }
 }
 
 static bool ProcessMessage(const Config &config, CNode *pfrom,
                            const std::string &strCommand, CDataStream &vRecv,
                            int64_t nTimeReceived, CConnman *connman,
                            BanMan *banman,
                            const std::atomic<bool> &interruptMsgProc,
                            bool enable_bip61) {
     const CChainParams &chainparams = config.GetChainParams();
     LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n",
              SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
     if (gArgs.IsArgSet("-dropmessagestest") &&
         GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) {
         LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
         return true;
     }
 
     if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
         (strCommand == NetMsgType::FILTERLOAD ||
          strCommand == NetMsgType::FILTERADD)) {
         if (pfrom->nVersion >= NO_BLOOM_VERSION) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "no-bloom-version");
             return false;
         } else {
             pfrom->fDisconnect = true;
             return false;
         }
     }
 
     if (strCommand == NetMsgType::REJECT) {
         if (LogAcceptCategory(BCLog::NET)) {
             try {
                 std::string strMsg;
                 uint8_t ccode;
                 std::string strReason;
                 vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >>
                     ccode >>
                     LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
 
                 std::ostringstream ss;
                 ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
 
                 if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) {
                     uint256 hash;
                     vRecv >> hash;
                     ss << ": hash " << hash.ToString();
                 }
                 LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
             } catch (const std::ios_base::failure &) {
                 // Avoid feedback loops by preventing reject messages from
                 // triggering a new reject message.
                 LogPrint(BCLog::NET, "Unparseable reject message received\n");
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::VERSION) {
         // Each connection can only send one version message
         if (pfrom->nVersion != 0) {
             if (enable_bip61) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(INIT_PROTO_VERSION)
                         .Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE,
                               std::string("Duplicate version message")));
             }
             LOCK(cs_main);
             Misbehaving(pfrom, 1, "multiple-version");
             return false;
         }
 
         int64_t nTime;
         CAddress addrMe;
         CAddress addrFrom;
         uint64_t nNonce = 1;
         uint64_t nServiceInt;
         ServiceFlags nServices;
         int nVersion;
         int nSendVersion;
         std::string cleanSubVer;
         int nStartingHeight = -1;
         bool fRelay = true;
 
         vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
         nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
         nServices = ServiceFlags(nServiceInt);
         if (!pfrom->fInbound) {
             connman->SetServices(pfrom->addr, nServices);
         }
         if (!pfrom->fInbound && !pfrom->fFeeler &&
             !pfrom->m_manual_connection &&
             !HasAllDesirableServiceFlags(nServices)) {
             LogPrint(BCLog::NET,
                      "peer=%d does not offer the expected services "
                      "(%08x offered, %08x expected); disconnecting\n",
                      pfrom->GetId(), nServices,
                      GetDesirableServiceFlags(nServices));
             if (enable_bip61) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(INIT_PROTO_VERSION)
                         .Make(NetMsgType::REJECT, strCommand,
                               REJECT_NONSTANDARD,
                               strprintf("Expected to offer services %08x",
                                         GetDesirableServiceFlags(nServices))));
             }
             pfrom->fDisconnect = true;
             return false;
         }
 
         if (nVersion < MIN_PEER_PROTO_VERSION) {
             // disconnect from peers older than this proto version
             LogPrint(BCLog::NET,
                      "peer=%d using obsolete version %i; disconnecting\n",
                      pfrom->GetId(), nVersion);
             if (enable_bip61) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(INIT_PROTO_VERSION)
                         .Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
                               strprintf("Version must be %d or greater",
                                         MIN_PEER_PROTO_VERSION)));
             }
             pfrom->fDisconnect = true;
             return false;
         }
 
         if (!vRecv.empty()) {
             vRecv >> addrFrom >> nNonce;
         }
         if (!vRecv.empty()) {
             std::string strSubVer;
             vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
             cleanSubVer = SanitizeString(strSubVer);
         }
         if (!vRecv.empty()) {
             vRecv >> nStartingHeight;
         }
         if (!vRecv.empty()) {
             vRecv >> fRelay;
         }
         // Disconnect if we connected to ourself
         if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce)) {
             LogPrintf("connected to self at %s, disconnecting\n",
                       pfrom->addr.ToString());
             pfrom->fDisconnect = true;
             return true;
         }
 
         if (pfrom->fInbound && addrMe.IsRoutable()) {
             SeenLocal(addrMe);
         }
 
         // Be shy and don't send version until we hear
         if (pfrom->fInbound) {
             PushNodeVersion(config, pfrom, connman, GetAdjustedTime());
         }
 
         connman->PushMessage(
             pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
 
         pfrom->nServices = nServices;
         pfrom->SetAddrLocal(addrMe);
         {
             LOCK(pfrom->cs_SubVer);
             pfrom->cleanSubVer = cleanSubVer;
         }
         pfrom->nStartingHeight = nStartingHeight;
 
         // set nodes not relaying blocks and tx and not serving (parts) of the
         // historical blockchain as "clients"
         pfrom->fClient = (!(nServices & NODE_NETWORK) &&
                           !(nServices & NODE_NETWORK_LIMITED));
 
         // set nodes not capable of serving the complete blockchain history as
         // "limited nodes"
         pfrom->m_limited_node =
             (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED));
 
         if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_filter);
             // set to true after we get the first filter* message
             pfrom->m_tx_relay->fRelayTxes = fRelay;
         }
 
         // Change version
         pfrom->SetSendVersion(nSendVersion);
         pfrom->nVersion = nVersion;
 
         // Potentially mark this peer as a preferred download peer.
         {
             LOCK(cs_main);
             UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
         }
 
         if (!pfrom->fInbound && pfrom->IsAddrRelayPeer()) {
             // Advertise our address
             if (fListen && !::ChainstateActive().IsInitialBlockDownload()) {
                 CAddress addr =
                     GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
                 FastRandomContext insecure_rand;
                 if (addr.IsRoutable()) {
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     pfrom->PushAddress(addr, insecure_rand);
                 } else if (IsPeerAddrLocalGood(pfrom)) {
                     addr.SetIP(addrMe);
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     pfrom->PushAddress(addr, insecure_rand);
                 }
             }
 
             // Get recent addresses
             if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION ||
                 connman->GetAddressCount() < 1000) {
                 connman->PushMessage(
                     pfrom,
                     CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
                 pfrom->fGetAddr = true;
             }
             connman->MarkAddressGood(pfrom->addr);
         }
 
         std::string remoteAddr;
         if (fLogIPs) {
             remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
         }
 
         LogPrint(BCLog::NET,
                  "receive version message: [%s] %s: version %d, blocks=%d, "
                  "us=%s, peer=%d%s\n",
                  pfrom->addr.ToString(), cleanSubVer, pfrom->nVersion,
                  pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
                  remoteAddr);
 
         // Ignore time offsets that are improbable (before the Genesis block)
         // and may underflow the nTimeOffset calculation.
         int64_t currentTime = GetTime();
         if (nTime >= int64_t(chainparams.GenesisBlock().nTime)) {
             int64_t nTimeOffset = nTime - currentTime;
             pfrom->nTimeOffset = nTimeOffset;
             AddTimeData(pfrom->addr, nTimeOffset);
         } else {
             LOCK(cs_main);
             Misbehaving(pfrom, 20,
                         "Ignoring invalid timestamp in version message");
         }
 
         // Feeler connections exist only to verify if address is online.
         if (pfrom->fFeeler) {
             assert(pfrom->fInbound == false);
             pfrom->fDisconnect = true;
         }
         return true;
     }
 
     if (pfrom->nVersion == 0) {
         // Must have a version message before anything else
         LOCK(cs_main);
         Misbehaving(pfrom, 10, "missing-version");
         return false;
     }
 
     // At this point, the outgoing message serialization version can't change.
     const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
 
     if (strCommand == NetMsgType::VERACK) {
         pfrom->SetRecvVersion(
             std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
 
         if (!pfrom->fInbound) {
             // Mark this node as currently connected, so we update its timestamp
             // later.
             LOCK(cs_main);
             State(pfrom->GetId())->fCurrentlyConnected = true;
             LogPrintf(
                 "New outbound peer connected: version: %d, blocks=%d, "
                 "peer=%d%s (%s)\n",
                 pfrom->nVersion.load(), pfrom->nStartingHeight, pfrom->GetId(),
                 (fLogIPs ? strprintf(", peeraddr=%s", pfrom->addr.ToString())
                          : ""),
                 pfrom->m_tx_relay == nullptr ? "block-relay" : "full-relay");
         }
 
         if (pfrom->nVersion >= SENDHEADERS_VERSION) {
             // Tell our peer we prefer to receive headers rather than inv's
             // We send this to non-NODE NETWORK peers as well, because even
             // non-NODE NETWORK peers can announce blocks (such as pruning
             // nodes)
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
         }
         if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
             // Tell our peer we are willing to provide version 1 or 2
             // cmpctblocks. However, we do not request new block announcements
             // using cmpctblock messages. We send this to non-NODE NETWORK peers
             // as well, because they may wish to request compact blocks from us.
             bool fAnnounceUsingCMPCTBLOCK = false;
             uint64_t nCMPCTBLOCKVersion = 1;
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT,
                                                       fAnnounceUsingCMPCTBLOCK,
                                                       nCMPCTBLOCKVersion));
         }
         pfrom->fSuccessfullyConnected = true;
         return true;
     }
 
     if (!pfrom->fSuccessfullyConnected) {
         // Must have a verack message before anything else
         LOCK(cs_main);
         Misbehaving(pfrom, 10, "missing-verack");
         return false;
     }
 
     if (strCommand == NetMsgType::ADDR) {
         std::vector<CAddress> vAddr;
         vRecv >> vAddr;
 
         // Don't want addr from older versions unless seeding
         if (pfrom->nVersion < CADDR_TIME_VERSION &&
             connman->GetAddressCount() > 1000) {
             return true;
         }
         if (!pfrom->IsAddrRelayPeer()) {
             return true;
         }
         if (vAddr.size() > 1000) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "oversized-addr");
             return error("message addr size() = %u", vAddr.size());
         }
 
         // Store the new addresses
         std::vector<CAddress> vAddrOk;
         int64_t nNow = GetAdjustedTime();
         int64_t nSince = nNow - 10 * 60;
         for (CAddress &addr : vAddr) {
             if (interruptMsgProc) {
                 return true;
             }
 
             // We only bother storing full nodes, though this may include things
             // which we would not make an outbound connection to, in part
             // because we may make feeler connections to them.
             if (!MayHaveUsefulAddressDB(addr.nServices) &&
                 !HasAllDesirableServiceFlags(addr.nServices)) {
                 continue;
             }
 
             if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) {
                 addr.nTime = nNow - 5 * 24 * 60 * 60;
             }
             pfrom->AddAddressKnown(addr);
             if (banman->IsBanned(addr)) {
                 // Do not process banned addresses beyond remembering
                 // we received them
                 continue;
             }
             bool fReachable = IsReachable(addr);
             if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 &&
                 addr.IsRoutable()) {
                 // Relay to a limited number of other nodes
                 RelayAddress(addr, fReachable, connman);
             }
             // Do not store addresses outside our network
             if (fReachable) {
                 vAddrOk.push_back(addr);
             }
         }
 
         connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
         if (vAddr.size() < 1000) {
             pfrom->fGetAddr = false;
         }
         if (pfrom->fOneShot) {
             pfrom->fDisconnect = true;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::SENDHEADERS) {
         LOCK(cs_main);
         State(pfrom->GetId())->fPreferHeaders = true;
         return true;
     }
 
     if (strCommand == NetMsgType::SENDCMPCT) {
         bool fAnnounceUsingCMPCTBLOCK = false;
         uint64_t nCMPCTBLOCKVersion = 0;
         vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
         if (nCMPCTBLOCKVersion == 1) {
             LOCK(cs_main);
             // fProvidesHeaderAndIDs is used to "lock in" version of compact
             // blocks we send.
             if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
                 State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
             }
 
             State(pfrom->GetId())->fPreferHeaderAndIDs =
                 fAnnounceUsingCMPCTBLOCK;
             if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
                 State(pfrom->GetId())->fSupportsDesiredCmpctVersion = true;
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::INV) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "oversized-inv");
             return error("message inv size() = %u", vInv.size());
         }
 
         // We won't accept tx inv's if we're in blocks-only mode, or this is a
         // block-relay-only peer
         bool fBlocksOnly = !g_relay_txes || (pfrom->m_tx_relay == nullptr);
 
         // Allow whitelisted peers to send data other than blocks in blocks only
         // mode if whitelistrelay is true
         if (pfrom->HasPermission(PF_RELAY)) {
             fBlocksOnly = false;
         }
 
         LOCK(cs_main);
 
         const auto current_time = GetTime<std::chrono::microseconds>();
 
         for (CInv &inv : vInv) {
             if (interruptMsgProc) {
                 return true;
             }
 
             bool fAlreadyHave = AlreadyHave(inv);
             LogPrint(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(),
                      fAlreadyHave ? "have" : "new", pfrom->GetId());
 
             if (inv.type == MSG_BLOCK) {
                 const BlockHash hash(inv.hash);
                 UpdateBlockAvailability(pfrom->GetId(), hash);
                 if (!fAlreadyHave && !fImporting && !fReindex &&
                     !mapBlocksInFlight.count(hash)) {
                     // We used to request the full block here, but since
                     // headers-announcements are now the primary method of
                     // announcement on the network, and since, in the case that
                     // a node fell back to inv we probably have a reorg which we
                     // should get the headers for first, we now only provide a
                     // getheaders response here. When we receive the headers, we
                     // will then ask for the blocks we need.
                     connman->PushMessage(
                         pfrom, msgMaker.Make(
                                    NetMsgType::GETHEADERS,
                                    ::ChainActive().GetLocator(pindexBestHeader),
                                    hash));
                     LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
                              pindexBestHeader->nHeight, hash.ToString(),
                              pfrom->GetId());
                 }
             } else {
                 pfrom->AddInventoryKnown(inv);
                 if (fBlocksOnly) {
                     LogPrint(BCLog::NET,
                              "transaction (%s) inv sent in violation of "
                              "protocol, disconnecting peer=%d\n",
                              inv.hash.ToString(), pfrom->GetId());
                     pfrom->fDisconnect = true;
                     return true;
                 } else if (!fAlreadyHave && !fImporting && !fReindex &&
                            !::ChainstateActive().IsInitialBlockDownload()) {
                     RequestTx(State(pfrom->GetId()), TxId(inv.hash),
                               current_time);
                 }
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::GETDATA) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-inv");
             return error("message getdata size() = %u", vInv.size());
         }
 
         LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
                  vInv.size(), pfrom->GetId());
 
         if (vInv.size() > 0) {
             LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
                      vInv[0].ToString(), pfrom->GetId());
         }
 
         pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(),
                                    vInv.end());
         ProcessGetData(config, pfrom, connman, interruptMsgProc);
         return true;
     }
 
     if (strCommand == NetMsgType::GETBLOCKS) {
         CBlockLocator locator;
         uint256 hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getblocks locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         // We might have announced the currently-being-connected tip using a
         // compact block, which resulted in the peer sending a getblocks
         // request, which we would otherwise respond to without the new block.
         // To avoid this situation we simply verify that we are on our best
         // known chain now. This is super overkill, but we handle it better
         // for getheaders requests, and there are no known nodes which support
         // compact blocks but still use getblocks to request blocks.
         {
             std::shared_ptr<const CBlock> a_recent_block;
             {
                 LOCK(cs_most_recent_block);
                 a_recent_block = most_recent_block;
             }
             CValidationState state;
             if (!ActivateBestChain(config, state, a_recent_block)) {
                 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                          FormatStateMessage(state));
             }
         }
 
         LOCK(cs_main);
 
         // Find the last block the caller has in the main chain
         const CBlockIndex *pindex =
             FindForkInGlobalIndex(::ChainActive(), locator);
 
         // Send the rest of the chain
         if (pindex) {
             pindex = ::ChainActive().Next(pindex);
         }
         int nLimit = 500;
         LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
                  pfrom->GetId());
         for (; pindex; pindex = ::ChainActive().Next(pindex)) {
             if (pindex->GetBlockHash() == hashStop) {
                 LogPrint(BCLog::NET, "  getblocks stopping at %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             // If pruning, don't inv blocks unless we have on disk and are
             // likely to still have for some reasonable time window (1 hour)
             // that block relay might require.
             const int nPrunedBlocksLikelyToHave =
                 MIN_BLOCKS_TO_KEEP -
                 3600 / chainparams.GetConsensus().nPowTargetSpacing;
             if (fPruneMode &&
                 (!pindex->nStatus.hasData() ||
                  pindex->nHeight <= ::ChainActive().Tip()->nHeight -
                                         nPrunedBlocksLikelyToHave)) {
                 LogPrint(
                     BCLog::NET,
                     " getblocks stopping, pruned or too old block at %d %s\n",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
             if (--nLimit <= 0) {
                 // When this block is requested, we'll send an inv that'll
                 // trigger the peer to getblocks the next batch of inventory.
                 LogPrint(BCLog::NET, "  getblocks stopping at limit %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 pfrom->hashContinue = pindex->GetBlockHash();
                 break;
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::GETBLOCKTXN) {
         BlockTransactionsRequest req;
         vRecv >> req;
 
         std::shared_ptr<const CBlock> recent_block;
         {
             LOCK(cs_most_recent_block);
             if (most_recent_block_hash == req.blockhash) {
                 recent_block = most_recent_block;
             }
             // Unlock cs_most_recent_block to avoid cs_main lock inversion
         }
         if (recent_block) {
             SendBlockTransactions(*recent_block, req, pfrom, connman);
             return true;
         }
 
         LOCK(cs_main);
 
         const CBlockIndex *pindex = LookupBlockIndex(req.blockhash);
         if (!pindex || !pindex->nStatus.hasData()) {
             LogPrint(
                 BCLog::NET,
                 "Peer %d sent us a getblocktxn for a block we don't have\n",
                 pfrom->GetId());
             return true;
         }
 
         if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) {
             // If an older block is requested (should never happen in practice,
             // but can happen in tests) send a block response instead of a
             // blocktxn response. Sending a full block response instead of a
             // small blocktxn response is preferable in the case where a peer
             // might maliciously send lots of getblocktxn requests to trigger
             // expensive disk reads, because it will require the peer to
             // actually receive all the data read from disk over the network.
             LogPrint(BCLog::NET,
                      "Peer %d sent us a getblocktxn for a block > %i deep\n",
                      pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
             CInv inv;
             inv.type = MSG_BLOCK;
             inv.hash = req.blockhash;
             pfrom->vRecvGetData.push_back(inv);
             // The message processing loop will go around again (without
             // pausing) and we'll respond then (without cs_main)
             return true;
         }
 
         CBlock block;
         bool ret = ReadBlockFromDisk(block, pindex, chainparams.GetConsensus());
         assert(ret);
 
         SendBlockTransactions(block, req, pfrom, connman);
         return true;
     }
 
     if (strCommand == NetMsgType::GETHEADERS) {
         CBlockLocator locator;
         BlockHash hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getheaders locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         LOCK(cs_main);
         if (::ChainstateActive().IsInitialBlockDownload() &&
             !pfrom->HasPermission(PF_NOBAN)) {
             LogPrint(BCLog::NET,
                      "Ignoring getheaders from peer=%d because node is in "
                      "initial block download\n",
                      pfrom->GetId());
             return true;
         }
 
         CNodeState *nodestate = State(pfrom->GetId());
         const CBlockIndex *pindex = nullptr;
         if (locator.IsNull()) {
             // If locator is null, return the hashStop block
             pindex = LookupBlockIndex(hashStop);
             if (!pindex) {
                 return true;
             }
 
             if (!BlockRequestAllowed(pindex, chainparams.GetConsensus())) {
                 LogPrint(BCLog::NET,
                          "%s: ignoring request from peer=%i for old block "
                          "header that isn't in the main chain\n",
                          __func__, pfrom->GetId());
                 return true;
             }
         } else {
             // Find the last block the caller has in the main chain
             pindex = FindForkInGlobalIndex(::ChainActive(), locator);
             if (pindex) {
                 pindex = ::ChainActive().Next(pindex);
             }
         }
 
         // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
         // count at the end
         std::vector<CBlock> vHeaders;
         int nLimit = MAX_HEADERS_RESULTS;
         LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(),
                  pfrom->GetId());
         for (; pindex; pindex = ::ChainActive().Next(pindex)) {
             vHeaders.push_back(pindex->GetBlockHeader());
             if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
                 break;
             }
         }
         // pindex can be nullptr either if we sent ::ChainActive().Tip() OR
         // if our peer has ::ChainActive().Tip() (and thus we are sending an
         // empty headers message). In both cases it's safe to update
         // pindexBestHeaderSent to be our tip.
         //
         // It is important that we simply reset the BestHeaderSent value here,
         // and not max(BestHeaderSent, newHeaderSent). We might have announced
         // the currently-being-connected tip using a compact block, which
         // resulted in the peer sending a headers request, which we respond to
         // without the new block. By resetting the BestHeaderSent, we ensure we
         // will re-announce the new block via headers (or compact blocks again)
         // in the SendMessages logic.
         nodestate->pindexBestHeaderSent =
             pindex ? pindex : ::ChainActive().Tip();
         connman->PushMessage(pfrom,
                              msgMaker.Make(NetMsgType::HEADERS, vHeaders));
         return true;
     }
 
     if (strCommand == NetMsgType::TX) {
         // Stop processing the transaction early if
         // We are in blocks only mode and peer is either not whitelisted or
         // whitelistrelay is off or if this peer is supposed to be a
         // block-relay-only peer
         if ((!g_relay_txes && !pfrom->HasPermission(PF_RELAY)) ||
             (pfrom->m_tx_relay == nullptr)) {
             LogPrint(BCLog::NET,
                      "transaction sent in violation of protocol peer=%d\n",
                      pfrom->GetId());
             pfrom->fDisconnect = true;
             return true;
         }
 
         CTransactionRef ptx;
         vRecv >> ptx;
         const CTransaction &tx = *ptx;
         const TxId &txid = tx.GetId();
 
         CInv inv(MSG_TX, txid);
         pfrom->AddInventoryKnown(inv);
 
         LOCK2(cs_main, g_cs_orphans);
 
         bool fMissingInputs = false;
         CValidationState state;
 
         CNodeState *nodestate = State(pfrom->GetId());
         nodestate->m_tx_download.m_tx_announced.erase(txid);
         nodestate->m_tx_download.m_tx_in_flight.erase(txid);
         EraseTxRequest(txid);
 
         if (!AlreadyHave(inv) &&
             AcceptToMemoryPool(config, g_mempool, state, ptx, &fMissingInputs,
                                false /* bypass_limits */,
                                Amount::zero() /* nAbsurdFee */)) {
             g_mempool.check(pcoinsTip.get());
             RelayTransaction(tx.GetId(), *connman);
             for (size_t i = 0; i < tx.vout.size(); i++) {
                 auto it_by_prev =
                     mapOrphanTransactionsByPrev.find(COutPoint(txid, i));
                 if (it_by_prev != mapOrphanTransactionsByPrev.end()) {
                     for (const auto &elem : it_by_prev->second) {
                         pfrom->orphan_work_set.insert(elem->first);
                     }
                 }
             }
 
             pfrom->nLastTXTime = GetTime();
 
             LogPrint(BCLog::MEMPOOL,
                      "AcceptToMemoryPool: peer=%d: accepted %s "
                      "(poolsz %u txn, %u kB)\n",
                      pfrom->GetId(), tx.GetId().ToString(), g_mempool.size(),
                      g_mempool.DynamicMemoryUsage() / 1000);
 
             // Recursively process any orphan transactions that depended on this
             // one
             ProcessOrphanTx(config, connman, pfrom->orphan_work_set);
         } else if (fMissingInputs) {
             // It may be the case that the orphans parents have all been
             // rejected.
             bool fRejectedParents = false;
             for (const CTxIn &txin : tx.vin) {
                 if (recentRejects->contains(txin.prevout.GetTxId())) {
                     fRejectedParents = true;
                     break;
                 }
             }
             if (!fRejectedParents) {
                 const auto current_time = GetTime<std::chrono::microseconds>();
 
                 for (const CTxIn &txin : tx.vin) {
                     // FIXME: MSG_TX should use a TxHash, not a TxId.
                     const TxId _txid = txin.prevout.GetTxId();
                     CInv _inv(MSG_TX, _txid);
                     pfrom->AddInventoryKnown(_inv);
                     if (!AlreadyHave(_inv)) {
                         RequestTx(State(pfrom->GetId()), _txid, current_time);
                     }
                 }
                 AddOrphanTx(ptx, pfrom->GetId());
 
                 // DoS prevention: do not allow mapOrphanTransactions to grow
                 // unbounded
                 unsigned int nMaxOrphanTx = (unsigned int)std::max(
                     int64_t(0), gArgs.GetArg("-maxorphantx",
                                              DEFAULT_MAX_ORPHAN_TRANSACTIONS));
                 unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
                 if (nEvicted > 0) {
                     LogPrint(BCLog::MEMPOOL,
                              "mapOrphan overflow, removed %u tx\n", nEvicted);
                 }
             } else {
                 LogPrint(BCLog::MEMPOOL,
                          "not keeping orphan with rejected parents %s\n",
                          tx.GetId().ToString());
                 // We will continue to reject this tx since it has rejected
                 // parents so avoid re-requesting it from other peers.
                 recentRejects->insert(tx.GetId());
             }
         } else {
             assert(IsTransactionReason(state.GetReason()));
 
             assert(recentRejects);
             recentRejects->insert(tx.GetId());
 
             if (RecursiveDynamicUsage(*ptx) < 100000) {
                 AddToCompactExtraTransactions(ptx);
             }
 
             if (pfrom->HasPermission(PF_FORCERELAY)) {
                 // Always relay transactions received from whitelisted peers,
                 // even if they were already in the mempool or rejected from it
                 // due to policy, allowing the node to function as a gateway for
                 // nodes hidden behind it.
                 //
                 // Never relay transactions that might result in being
                 // disconnected (or banned).
                 if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) {
                     LogPrintf("Not relaying invalid transaction %s from "
                               "whitelisted peer=%d (%s)\n",
                               tx.GetId().ToString(), pfrom->GetId(),
                               FormatStateMessage(state));
                 } else {
                     LogPrintf("Force relaying tx %s from whitelisted peer=%d\n",
                               tx.GetId().ToString(), pfrom->GetId());
                     RelayTransaction(tx.GetId(), *connman);
                 }
             }
         }
 
         // If a tx has been detected by recentRejects, we will have reached
         // this point and the tx will have been ignored. Because we haven't run
         // the tx through AcceptToMemoryPool, we won't have computed a DoS
         // score for it or determined exactly why we consider it invalid.
         //
         // This means we won't penalize any peer subsequently relaying a DoSy
         // tx (even if we penalized the first peer who gave it to us) because
         // we have to account for recentRejects showing false positives. In
         // other words, we shouldn't penalize a peer if we aren't *sure* they
         // submitted a DoSy tx.
         //
         // Note that recentRejects doesn't just record DoSy or invalid
         // transactions, but any tx not accepted by the mempool, which may be
         // due to node policy (vs. consensus). So we can't blanket penalize a
         // peer simply for relaying a tx that our recentRejects has caught,
         // regardless of false positives.
 
         if (state.IsInvalid()) {
             LogPrint(BCLog::MEMPOOLREJ,
                      "%s from peer=%d was not accepted: %s\n",
                      tx.GetHash().ToString(), pfrom->GetId(),
                      FormatStateMessage(state));
             // Never send AcceptToMemoryPool's internal codes over P2P
             if (enable_bip61 && state.GetRejectCode() > 0 &&
                 state.GetRejectCode() < REJECT_INTERNAL) {
                 connman->PushMessage(
                     pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand,
                                          uint8_t(state.GetRejectCode()),
                                          state.GetRejectReason().substr(
                                              0, MAX_REJECT_MESSAGE_LENGTH),
                                          inv.hash));
             }
             MaybePunishNode(pfrom->GetId(), state, /*via_compact_block*/ false);
         }
         return true;
     }
 
     if (strCommand == NetMsgType::CMPCTBLOCK) {
         // Ignore cmpctblock received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected cmpctblock message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         CBlockHeaderAndShortTxIDs cmpctblock;
         vRecv >> cmpctblock;
 
         bool received_new_header = false;
 
         {
             LOCK(cs_main);
 
             if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) {
                 // Doesn't connect (or is genesis), instead of DoSing in
                 // AcceptBlockHeader, request deeper headers
                 if (!::ChainstateActive().IsInitialBlockDownload()) {
                     connman->PushMessage(
                         pfrom, msgMaker.Make(
                                    NetMsgType::GETHEADERS,
                                    ::ChainActive().GetLocator(pindexBestHeader),
                                    uint256()));
                 }
                 return true;
             }
 
             if (!LookupBlockIndex(cmpctblock.header.GetHash())) {
                 received_new_header = true;
             }
         }
 
         const CBlockIndex *pindex = nullptr;
         CValidationState state;
         if (!ProcessNewBlockHeaders(config, {cmpctblock.header}, state,
                                     &pindex)) {
             if (state.IsInvalid()) {
                 MaybePunishNode(pfrom->GetId(), state,
                                 /*via_compact_block*/ true,
                                 "invalid header via cmpctblock");
                 return true;
             }
         }
 
         // When we succeed in decoding a block's txids from a cmpctblock
         // message we typically jump to the BLOCKTXN handling code, with a
         // dummy (empty) BLOCKTXN message, to re-use the logic there in
         // completing processing of the putative block (without cs_main).
         bool fProcessBLOCKTXN = false;
         CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
 
         // If we end up treating this as a plain headers message, call that as
         // well
         // without cs_main.
         bool fRevertToHeaderProcessing = false;
 
         // Keep a CBlock for "optimistic" compactblock reconstructions (see
         // below)
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockReconstructed = false;
 
         {
             LOCK2(cs_main, g_cs_orphans);
             // If AcceptBlockHeader returned true, it set pindex
             assert(pindex);
             UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
 
             CNodeState *nodestate = State(pfrom->GetId());
 
             // If this was a new header with more work than our tip, update the
             // peer's last block announcement time
             if (received_new_header &&
                 pindex->nChainWork > ::ChainActive().Tip()->nChainWork) {
                 nodestate->m_last_block_announcement = GetTime();
             }
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator blockInFlightIt =
                     mapBlocksInFlight.find(pindex->GetBlockHash());
             bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
 
             if (pindex->nStatus.hasData()) {
                 // Nothing to do here
                 return true;
             }
 
             if (pindex->nChainWork <=
                     ::ChainActive()
                         .Tip()
                         ->nChainWork || // We know something better
                 pindex->nTx != 0) {
                 // We had this block at some point, but pruned it
                 if (fAlreadyInFlight) {
                     // We requested this block for some reason, but our mempool
                     // will probably be useless so we just grab the block via
                     // normal getdata.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                 }
                 return true;
             }
 
             // If we're not close to tip yet, give up and let parallel block
             // fetch work its magic.
             if (!fAlreadyInFlight &&
                 !CanDirectFetch(chainparams.GetConsensus())) {
                 return true;
             }
 
             // We want to be a bit conservative just to be extra careful about
             // DoS possibilities in compact block processing...
             if (pindex->nHeight <= ::ChainActive().Height() + 2) {
                 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
                                               MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
                     (fAlreadyInFlight &&
                      blockInFlightIt->second.first == pfrom->GetId())) {
                     std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
                     if (!MarkBlockAsInFlight(config, pfrom->GetId(),
                                              pindex->GetBlockHash(),
                                              chainparams.GetConsensus(), pindex,
                                              &queuedBlockIt)) {
                         if (!(*queuedBlockIt)->partialBlock) {
                             (*queuedBlockIt)
                                 ->partialBlock.reset(
                                     new PartiallyDownloadedBlock(config,
                                                                  &g_mempool));
                         } else {
                             // The block was already in flight using compact
                             // blocks from the same peer.
                             LogPrint(BCLog::NET, "Peer sent us compact block "
                                                  "we were already syncing!\n");
                             return true;
                         }
                     }
 
                     PartiallyDownloadedBlock &partialBlock =
                         *(*queuedBlockIt)->partialBlock;
                     ReadStatus status =
                         partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status == READ_STATUS_INVALID) {
                         // Reset in-flight state in case of whitelist
                         MarkBlockAsReceived(pindex->GetBlockHash());
                         Misbehaving(pfrom, 100, "invalid-cmpctblk");
                         LogPrintf("Peer %d sent us invalid compact block\n",
                                   pfrom->GetId());
                         return true;
                     } else if (status == READ_STATUS_FAILED) {
                         // Duplicate txindices, the block is now in-flight, so
                         // just request it.
                         std::vector<CInv> vInv(1);
                         vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                         connman->PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                         return true;
                     }
 
                     BlockTransactionsRequest req;
                     for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
                         if (!partialBlock.IsTxAvailable(i)) {
                             req.indices.push_back(i);
                         }
                     }
                     if (req.indices.empty()) {
                         // Dirty hack to jump to BLOCKTXN code (TODO: move
                         // message handling into their own functions)
                         BlockTransactions txn;
                         txn.blockhash = cmpctblock.header.GetHash();
                         blockTxnMsg << txn;
                         fProcessBLOCKTXN = true;
                     } else {
                         req.blockhash = pindex->GetBlockHash();
                         connman->PushMessage(
                             pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
                     }
                 } else {
                     // This block is either already in flight from a different
                     // peer, or this peer has too many blocks outstanding to
                     // download from. Optimistically try to reconstruct anyway
                     // since we might be able to without any round trips.
                     PartiallyDownloadedBlock tempBlock(config, &g_mempool);
                     ReadStatus status =
                         tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status != READ_STATUS_OK) {
                         // TODO: don't ignore failures
                         return true;
                     }
                     std::vector<CTransactionRef> dummy;
                     status = tempBlock.FillBlock(*pblock, dummy);
                     if (status == READ_STATUS_OK) {
                         fBlockReconstructed = true;
                     }
                 }
             } else {
                 if (fAlreadyInFlight) {
                     // We requested this block, but its far into the future, so
                     // our mempool will probably be useless - request the block
                     // normally.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     connman->PushMessage(
                         pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                     return true;
                 } else {
                     // If this was an announce-cmpctblock, we want the same
                     // treatment as a header message.
                     fRevertToHeaderProcessing = true;
                 }
             }
         } // cs_main
 
         if (fProcessBLOCKTXN) {
             return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
                                   blockTxnMsg, nTimeReceived, connman, banman,
                                   interruptMsgProc, enable_bip61);
         }
 
         if (fRevertToHeaderProcessing) {
             // Headers received from HB compact block peers are permitted to be
             // relayed before full validation (see BIP 152), so we don't want to
             // disconnect the peer if the header turns out to be for an invalid
             // block. Note that if a peer tries to build on an invalid chain,
             // that will be detected and the peer will be banned.
             return ProcessHeadersMessage(config, pfrom, connman,
                                          {cmpctblock.header},
                                          /*via_compact_block=*/true);
         }
 
         if (fBlockReconstructed) {
             // If we got here, we were able to optimistically reconstruct a
             // block that is in flight from some other peer.
             {
                 LOCK(cs_main);
                 mapBlockSource.emplace(pblock->GetHash(),
                                        std::make_pair(pfrom->GetId(), false));
             }
             bool fNewBlock = false;
             // Setting fForceProcessing to true means that we bypass some of
             // our anti-DoS protections in AcceptBlock, which filters
             // unrequested blocks that might be trying to waste our resources
             // (eg disk space). Because we only try to reconstruct blocks when
             // we're close to caught up (via the CanDirectFetch() requirement
             // above, combined with the behavior of not requesting blocks until
             // we have a chain with at least nMinimumChainWork), and we ignore
             // compact blocks with less work than our tip, it is safe to treat
             // reconstructed compact blocks as having been requested.
             ProcessNewBlock(config, pblock, /*fForceProcessing=*/true,
                             &fNewBlock);
             if (fNewBlock) {
                 pfrom->nLastBlockTime = GetTime();
             } else {
                 LOCK(cs_main);
                 mapBlockSource.erase(pblock->GetHash());
             }
 
             // hold cs_main for CBlockIndex::IsValid()
             LOCK(cs_main);
             if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
                 // Clear download state for this block, which is in process from
                 // some other peer. We do this after calling. ProcessNewBlock so
                 // that a malleated cmpctblock announcement can't be used to
                 // interfere with block relay.
                 MarkBlockAsReceived(pblock->GetHash());
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::BLOCKTXN) {
         // Ignore blocktxn received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected blocktxn message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         BlockTransactions resp;
         vRecv >> resp;
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockRead = false;
         {
             LOCK(cs_main);
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator it = mapBlocksInFlight.find(resp.blockhash);
             if (it == mapBlocksInFlight.end() ||
                 !it->second.second->partialBlock ||
                 it->second.first != pfrom->GetId()) {
                 LogPrint(BCLog::NET,
                          "Peer %d sent us block transactions for block "
                          "we weren't expecting\n",
                          pfrom->GetId());
                 return true;
             }
 
             PartiallyDownloadedBlock &partialBlock =
                 *it->second.second->partialBlock;
             ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
             if (status == READ_STATUS_INVALID) {
                 // Reset in-flight state in case of whitelist.
                 MarkBlockAsReceived(resp.blockhash);
                 Misbehaving(pfrom, 100, "invalid-cmpctblk-txns");
                 LogPrintf("Peer %d sent us invalid compact block/non-matching "
                           "block transactions\n",
                           pfrom->GetId());
                 return true;
             } else if (status == READ_STATUS_FAILED) {
                 // Might have collided, fall back to getdata now :(
                 std::vector<CInv> invs;
                 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
                 connman->PushMessage(pfrom,
                                      msgMaker.Make(NetMsgType::GETDATA, invs));
             } else {
                 // Block is either okay, or possibly we received
                 // READ_STATUS_CHECKBLOCK_FAILED.
                 // Note that CheckBlock can only fail for one of a few reasons:
                 // 1. bad-proof-of-work (impossible here, because we've already
                 //    accepted the header)
                 // 2. merkleroot doesn't match the transactions given (already
                 //    caught in FillBlock with READ_STATUS_FAILED, so
                 //    impossible here)
                 // 3. the block is otherwise invalid (eg invalid coinbase,
                 //    block is too big, too many legacy sigops, etc).
                 // So if CheckBlock failed, #3 is the only possibility.
                 // Under BIP 152, we don't DoS-ban unless proof of work is
                 // invalid (we don't require all the stateless checks to have
                 // been run). This is handled below, so just treat this as
                 // though the block was successfully read, and rely on the
                 // handling in ProcessNewBlock to ensure the block index is
                 // updated, reject messages go out, etc.
 
                 // it is now an empty pointer
                 MarkBlockAsReceived(resp.blockhash);
                 fBlockRead = true;
                 // mapBlockSource is only used for sending reject messages and
                 // DoS scores, so the race between here and cs_main in
                 // ProcessNewBlock is fine. BIP 152 permits peers to relay
                 // compact blocks after validating the header only; we should
                 // not punish peers if the block turns out to be invalid.
                 mapBlockSource.emplace(resp.blockhash,
                                        std::make_pair(pfrom->GetId(), false));
             }
         } // Don't hold cs_main when we call into ProcessNewBlock
         if (fBlockRead) {
             bool fNewBlock = false;
             // Since we requested this block (it was in mapBlocksInFlight),
             // force it to be processed, even if it would not be a candidate for
             // new tip (missing previous block, chain not long enough, etc)
             // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
             // disk-space attacks), but this should be safe due to the
             // protections in the compact block handler -- see related comment
             // in compact block optimistic reconstruction handling.
             ProcessNewBlock(config, pblock, /*fForceProcessing=*/true,
                             &fNewBlock);
             if (fNewBlock) {
                 pfrom->nLastBlockTime = GetTime();
             } else {
                 LOCK(cs_main);
                 mapBlockSource.erase(pblock->GetHash());
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::HEADERS) {
         // Ignore headers received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected headers message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         std::vector<CBlockHeader> headers;
 
         // Bypass the normal CBlock deserialization, as we don't want to risk
         // deserializing 2000 full blocks.
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > MAX_HEADERS_RESULTS) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-headers");
             return error("headers message size = %u", nCount);
         }
         headers.resize(nCount);
         for (unsigned int n = 0; n < nCount; n++) {
             vRecv >> headers[n];
             // Ignore tx count; assume it is 0.
             ReadCompactSize(vRecv);
         }
 
         return ProcessHeadersMessage(config, pfrom, connman, headers,
                                      /*via_compact_block=*/false);
     }
 
     if (strCommand == NetMsgType::BLOCK) {
         // Ignore block received while importing
         if (fImporting || fReindex) {
             LogPrint(BCLog::NET,
                      "Unexpected block message received from peer %d\n",
                      pfrom->GetId());
             return true;
         }
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         vRecv >> *pblock;
 
         LogPrint(BCLog::NET, "received block %s peer=%d\n",
                  pblock->GetHash().ToString(), pfrom->GetId());
 
         // Process all blocks from whitelisted peers, even if not requested,
         // unless we're still syncing with the network. Such an unrequested
         // block may still be processed, subject to the conditions in
         // AcceptBlock().
         bool forceProcessing = pfrom->HasPermission(PF_NOBAN) &&
                                !::ChainstateActive().IsInitialBlockDownload();
         const BlockHash hash = pblock->GetHash();
         {
             LOCK(cs_main);
             // Also always process if we requested the block explicitly, as we
             // may need it even though it is not a candidate for a new best tip.
             forceProcessing |= MarkBlockAsReceived(hash);
             // mapBlockSource is only used for sending reject messages and DoS
             // scores, so the race between here and cs_main in ProcessNewBlock
             // is fine.
             mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
         }
         bool fNewBlock = false;
         ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock);
         if (fNewBlock) {
             pfrom->nLastBlockTime = GetTime();
         } else {
             LOCK(cs_main);
             mapBlockSource.erase(hash);
         }
         return true;
     }
 
     // Ignore avalanche requests while importing
     if (strCommand == NetMsgType::AVAPOLL && !fImporting && !fReindex &&
         g_avalanche &&
         gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
         auto now = std::chrono::steady_clock::now();
         int64_t cooldown =
             gArgs.GetArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
 
         {
             LOCK(cs_main);
             auto &node_state = State(pfrom->GetId())->m_avalanche_state;
 
             if (now <
                 node_state.last_poll + std::chrono::milliseconds(cooldown)) {
                 Misbehaving(pfrom, 20, "avapool-cooldown");
             }
 
             node_state.last_poll = now;
         }
 
         uint64_t round;
         Unserialize(vRecv, round);
 
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
             LOCK(cs_main);
             Misbehaving(pfrom, 20, "too-many-ava-poll");
             return error("poll message size = %u", nCount);
         }
 
-        std::vector<AvalancheVote> votes;
+        std::vector<avalanche::Vote> votes;
         votes.reserve(nCount);
 
         LogPrint(BCLog::NET, "received avalanche poll from peer=%d\n",
                  pfrom->GetId());
 
         {
             LOCK(cs_main);
 
             for (unsigned int n = 0; n < nCount; n++) {
                 CInv inv;
                 vRecv >> inv;
 
                 uint32_t error = -1;
                 if (inv.type == MSG_BLOCK) {
                     BlockMap::iterator mi =
                         mapBlockIndex.find(BlockHash(inv.hash));
                     if (mi != mapBlockIndex.end()) {
                         error = ::ChainActive().Contains(mi->second) ? 0 : 1;
                     }
                 }
 
                 votes.emplace_back(error, inv.hash);
             }
         }
 
         // Send the query to the node.
         g_avalanche->sendResponse(
-            pfrom, AvalancheResponse(round, cooldown, std::move(votes)));
+            pfrom, avalanche::Response(round, cooldown, std::move(votes)));
         return true;
     }
 
     // Ignore avalanche requests while importing
     if (strCommand == NetMsgType::AVARESPONSE && !fImporting && !fReindex &&
         g_avalanche &&
         gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) {
         // As long as QUIC is not implemented, we need to sign response and
         // verify response's signatures in order to avoid any manipulation of
         // messages at the transport level.
         CHashVerifier<CDataStream> verifier(&vRecv);
-        AvalancheResponse response;
+        avalanche::Response response;
         verifier >> response;
 
         if (!g_avalanche->forNode(
                 pfrom->GetId(), [&](const avalanche::Node &n) {
                     std::array<uint8_t, 64> sig;
                     vRecv >> sig;
 
                     // Unfortunately, the verify API require a vector.
                     std::vector<uint8_t> vchSig{sig.begin(), sig.end()};
                     return n.pubkey.VerifySchnorr(verifier.GetHash(), vchSig);
                 })) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "invalid-ava-response-signature");
             return true;
         }
 
         std::vector<avalanche::BlockUpdate> updates;
         if (!g_avalanche->registerVotes(pfrom->GetId(), response, updates)) {
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "invalid-ava-response-content");
             return true;
         }
 
         if (updates.size()) {
             for (avalanche::BlockUpdate &u : updates) {
                 CBlockIndex *pindex = u.getBlockIndex();
                 switch (u.getStatus()) {
                     case avalanche::BlockUpdate::Status::Invalid:
                     case avalanche::BlockUpdate::Status::Rejected: {
                         CValidationState state;
                         ParkBlock(config, state, pindex);
                         if (!state.IsValid()) {
                             return error("Database error: %s",
                                          state.GetRejectReason());
                         }
                     } break;
                     case avalanche::BlockUpdate::Status::Accepted:
                     case avalanche::BlockUpdate::Status::Finalized: {
                         LOCK(cs_main);
                         UnparkBlock(pindex);
                     } break;
                 }
             }
 
             CValidationState state;
             if (!ActivateBestChain(config, state)) {
                 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                          FormatStateMessage(state));
             }
         }
 
         return true;
     }
 
     if (strCommand == NetMsgType::GETADDR) {
         // This asymmetric behavior for inbound and outbound connections was
         // introduced to prevent a fingerprinting attack: an attacker can send
         // specific fake addresses to users' AddrMan and later request them by
         // sending getaddr messages. Making nodes which are behind NAT and can
         // only make outgoing connections ignore the getaddr message mitigates
         // the attack.
         if (!pfrom->fInbound) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from outbound connection. peer=%d\n",
                      pfrom->GetId());
             return true;
         }
         if (!pfrom->IsAddrRelayPeer()) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from block-relay-only connection. "
                      "peer=%d\n",
                      pfrom->GetId());
             return true;
         }
 
         // Only send one GetAddr response per connection to reduce resource
         // waste and discourage addr stamping of INV announcements.
         if (pfrom->fSentAddr) {
             LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
                      pfrom->GetId());
             return true;
         }
         pfrom->fSentAddr = true;
 
         pfrom->vAddrToSend.clear();
         std::vector<CAddress> vAddr = connman->GetAddresses();
         FastRandomContext insecure_rand;
         for (const CAddress &addr : vAddr) {
             if (!banman->IsBanned(addr)) {
                 pfrom->PushAddress(addr, insecure_rand);
             }
         }
         return true;
     }
 
     if (strCommand == NetMsgType::MEMPOOL) {
         if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
             !pfrom->HasPermission(PF_MEMPOOL)) {
             if (!pfrom->HasPermission(PF_NOBAN)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bloom filters disabled, "
                          "disconnect peer=%d\n",
                          pfrom->GetId());
                 pfrom->fDisconnect = true;
             }
             return true;
         }
 
         if (connman->OutboundTargetReached(false) &&
             !pfrom->HasPermission(PF_MEMPOOL)) {
             if (!pfrom->HasPermission(PF_NOBAN)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bandwidth limit reached, "
                          "disconnect peer=%d\n",
                          pfrom->GetId());
                 pfrom->fDisconnect = true;
             }
             return true;
         }
 
         if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_tx_inventory);
             pfrom->m_tx_relay->fSendMempool = true;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::PING) {
         if (pfrom->nVersion > BIP0031_VERSION) {
             uint64_t nonce = 0;
             vRecv >> nonce;
             // Echo the message back with the nonce. This allows for two useful
             // features:
             //
             // 1) A remote node can quickly check if the connection is
             // operational.
             // 2) Remote nodes can measure the latency of the network thread. If
             // this node is overloaded it won't respond to pings quickly and the
             // remote node can avoid sending us more work, like chain download
             // requests.
             //
             // The nonce stops the remote getting confused between different
             // pings: without it, if the remote node sends a ping once per
             // second and this node takes 5 seconds to respond to each, the 5th
             // ping the remote sends would appear to return very quickly.
             connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
         }
         return true;
     }
 
     if (strCommand == NetMsgType::PONG) {
         int64_t pingUsecEnd = nTimeReceived;
         uint64_t nonce = 0;
         size_t nAvail = vRecv.in_avail();
         bool bPingFinished = false;
         std::string sProblem;
 
         if (nAvail >= sizeof(nonce)) {
             vRecv >> nonce;
 
             // Only process pong message if there is an outstanding ping (old
             // ping without nonce should never pong)
             if (pfrom->nPingNonceSent != 0) {
                 if (nonce == pfrom->nPingNonceSent) {
                     // Matching pong received, this ping is no longer
                     // outstanding
                     bPingFinished = true;
                     int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
                     if (pingUsecTime > 0) {
                         // Successful ping time measurement, replace previous
                         pfrom->nPingUsecTime = pingUsecTime;
                         pfrom->nMinPingUsecTime = std::min(
                             pfrom->nMinPingUsecTime.load(), pingUsecTime);
                     } else {
                         // This should never happen
                         sProblem = "Timing mishap";
                     }
                 } else {
                     // Nonce mismatches are normal when pings are overlapping
                     sProblem = "Nonce mismatch";
                     if (nonce == 0) {
                         // This is most likely a bug in another implementation
                         // somewhere; cancel this ping
                         bPingFinished = true;
                         sProblem = "Nonce zero";
                     }
                 }
             } else {
                 sProblem = "Unsolicited pong without ping";
             }
         } else {
             // This is most likely a bug in another implementation somewhere;
             // cancel this ping
             bPingFinished = true;
             sProblem = "Short payload";
         }
 
         if (!(sProblem.empty())) {
             LogPrint(BCLog::NET,
                      "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
                      pfrom->GetId(), sProblem, pfrom->nPingNonceSent, nonce,
                      nAvail);
         }
         if (bPingFinished) {
             pfrom->nPingNonceSent = 0;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::FILTERLOAD) {
         CBloomFilter filter;
         vRecv >> filter;
 
         if (!filter.IsWithinSizeConstraints()) {
             // There is no excuse for sending a too-large filter
             LOCK(cs_main);
             Misbehaving(pfrom, 100, "oversized-bloom-filter");
         } else if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_filter);
             pfrom->m_tx_relay->pfilter.reset(new CBloomFilter(filter));
             pfrom->m_tx_relay->pfilter->UpdateEmptyFull();
             pfrom->m_tx_relay->fRelayTxes = true;
         }
         return true;
     }
 
     if (strCommand == NetMsgType::FILTERADD) {
         std::vector<uint8_t> vData;
         vRecv >> vData;
 
         // Nodes must NEVER send a data item > 520 bytes (the max size for a
         // script data object, and thus, the maximum size any matched object can
         // have) in a filteradd message.
         bool bad = false;
         if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
             bad = true;
         } else if (pfrom->m_tx_relay != nullptr) {
             LOCK(pfrom->m_tx_relay->cs_filter);
             if (pfrom->m_tx_relay->pfilter) {
                 pfrom->m_tx_relay->pfilter->insert(vData);
             } else {
                 bad = true;
             }
         }
         if (bad) {
             LOCK(cs_main);
             // The structure of this code doesn't really allow for a good error
             // code. We'll go generic.
             Misbehaving(pfrom, 100, "invalid-filteradd");
         }
         return true;
     }
 
     if (strCommand == NetMsgType::FILTERCLEAR) {
         if (pfrom->m_tx_relay == nullptr) {
             return true;
         }
         LOCK(pfrom->m_tx_relay->cs_filter);
         if (pfrom->GetLocalServices() & NODE_BLOOM) {
             pfrom->m_tx_relay->pfilter.reset(new CBloomFilter());
         }
         pfrom->m_tx_relay->fRelayTxes = true;
         return true;
     }
 
     if (strCommand == NetMsgType::FEEFILTER) {
         Amount newFeeFilter = Amount::zero();
         vRecv >> newFeeFilter;
         if (MoneyRange(newFeeFilter)) {
             if (pfrom->m_tx_relay != nullptr) {
                 LOCK(pfrom->m_tx_relay->cs_feeFilter);
                 pfrom->m_tx_relay->minFeeFilter = newFeeFilter;
             }
             LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
                      CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
         }
         return true;
     }
 
     if (strCommand == NetMsgType::NOTFOUND) {
         // Remove the NOTFOUND transactions from the peer
         LOCK(cs_main);
         CNodeState *state = State(pfrom->GetId());
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() <=
             MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             for (CInv &inv : vInv) {
                 if (inv.type == MSG_TX) {
                     const TxId txid(inv.hash);
                     // If we receive a NOTFOUND message for a txid we requested,
                     // erase it from our data structures for this peer.
                     auto in_flight_it =
                         state->m_tx_download.m_tx_in_flight.find(txid);
                     if (in_flight_it ==
                         state->m_tx_download.m_tx_in_flight.end()) {
                         // Skip any further work if this is a spurious NOTFOUND
                         // message.
                         continue;
                     }
                     state->m_tx_download.m_tx_in_flight.erase(in_flight_it);
                     state->m_tx_download.m_tx_announced.erase(txid);
                 }
             }
         }
         return true;
     }
 
     // Ignore unknown commands for extensibility
     LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
              SanitizeString(strCommand), pfrom->GetId());
     return true;
 }
 
 bool PeerLogicValidation::SendRejectsAndCheckIfBanned(CNode *pnode,
                                                       bool enable_bip61)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     AssertLockHeld(cs_main);
     CNodeState &state = *State(pnode->GetId());
 
     if (enable_bip61) {
         for (const CBlockReject &reject : state.rejects) {
             connman->PushMessage(
                 pnode,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, std::string(NetMsgType::BLOCK),
                           reject.chRejectCode, reject.strRejectReason,
                           reject.hashBlock));
         }
     }
     state.rejects.clear();
 
     if (state.fShouldBan) {
         state.fShouldBan = false;
         if (pnode->HasPermission(PF_NOBAN)) {
             LogPrintf("Warning: not punishing whitelisted peer %s!\n",
                       pnode->addr.ToString());
         } else if (pnode->m_manual_connection) {
             LogPrintf("Warning: not punishing manually-connected peer %s!\n",
                       pnode->addr.ToString());
         } else if (pnode->addr.IsLocal()) {
             // Disconnect but don't ban _this_ local node
             LogPrintf(
                 "Warning: disconnecting but not discouraging local peer %s!\n",
                 pnode->addr.ToString());
             pnode->fDisconnect = true;
         } else {
             // Disconnect and ban all nodes sharing the address
             LogPrintf("Disconnecting and discouraging peer %s!\n",
                       pnode->addr.ToString());
             if (m_banman) {
                 m_banman->Ban(pnode->addr, BanReasonNodeMisbehaving);
             }
             connman->DisconnectNode(pnode->addr);
         }
         return true;
     }
     return false;
 }
 
 bool PeerLogicValidation::ProcessMessages(const Config &config, CNode *pfrom,
                                           std::atomic<bool> &interruptMsgProc) {
     const CChainParams &chainparams = config.GetChainParams();
     //
     // Message format
     //  (4) message start
     //  (12) command
     //  (4) size
     //  (4) checksum
     //  (x) data
     //
     bool fMoreWork = false;
 
     if (!pfrom->vRecvGetData.empty()) {
         ProcessGetData(config, pfrom, connman, interruptMsgProc);
     }
 
     if (!pfrom->orphan_work_set.empty()) {
         LOCK2(cs_main, g_cs_orphans);
         ProcessOrphanTx(config, connman, pfrom->orphan_work_set);
     }
 
     if (pfrom->fDisconnect) {
         return false;
     }
 
     // this maintains the order of responses and prevents vRecvGetData from
     // growing unbounded
     if (!pfrom->vRecvGetData.empty()) {
         return true;
     }
     if (!pfrom->orphan_work_set.empty()) {
         return true;
     }
 
     // Don't bother if send buffer is too full to respond anyway
     if (pfrom->fPauseSend) {
         return false;
     }
 
     std::list<CNetMessage> msgs;
     {
         LOCK(pfrom->cs_vProcessMsg);
         if (pfrom->vProcessMsg.empty()) {
             return false;
         }
         // Just take one message
         msgs.splice(msgs.begin(), pfrom->vProcessMsg,
                     pfrom->vProcessMsg.begin());
         pfrom->nProcessQueueSize -=
             msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
         pfrom->fPauseRecv =
             pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
         fMoreWork = !pfrom->vProcessMsg.empty();
     }
     CNetMessage &msg(msgs.front());
 
     msg.SetVersion(pfrom->GetRecvVersion());
 
     // Scan for message start
     if (memcmp(std::begin(msg.hdr.pchMessageStart),
                std::begin(chainparams.NetMagic()),
                CMessageHeader::MESSAGE_START_SIZE) != 0) {
         LogPrint(BCLog::NET,
                  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
                  SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
 
         // Make sure we ban where that come from for some time.
         if (m_banman) {
             m_banman->Ban(pfrom->addr, BanReasonNodeMisbehaving);
         }
         connman->DisconnectNode(pfrom->addr);
 
         pfrom->fDisconnect = true;
         return false;
     }
 
     // Read header
     CMessageHeader &hdr = msg.hdr;
     if (!hdr.IsValid(config)) {
         LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
                  SanitizeString(hdr.GetCommand()), pfrom->GetId());
         return fMoreWork;
     }
     std::string strCommand = hdr.GetCommand();
 
     // Message size
     unsigned int nMessageSize = hdr.nMessageSize;
 
     // Checksum
     CDataStream &vRecv = msg.vRecv;
     const uint256 &hash = msg.GetMessageHash();
     if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) !=
         0) {
         LogPrint(
             BCLog::NET,
             "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s from "
             "peer=%d\n",
             __func__, SanitizeString(strCommand), nMessageSize,
             HexStr(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE),
             HexStr(hdr.pchChecksum,
                    hdr.pchChecksum + CMessageHeader::CHECKSUM_SIZE),
             pfrom->GetId());
         if (m_banman) {
             m_banman->Ban(pfrom->addr, BanReasonNodeMisbehaving);
         }
         connman->DisconnectNode(pfrom->addr);
         return fMoreWork;
     }
 
     // Process message
     bool fRet = false;
     try {
         fRet =
             ProcessMessage(config, pfrom, strCommand, vRecv, msg.nTime, connman,
                            m_banman, interruptMsgProc, m_enable_bip61);
         if (interruptMsgProc) {
             return false;
         }
 
         if (!pfrom->vRecvGetData.empty()) {
             fMoreWork = true;
         }
     } catch (const std::ios_base::failure &e) {
         if (m_enable_bip61) {
             connman->PushMessage(
                 pfrom,
                 CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED,
                           std::string("error parsing message")));
         }
         if (strstr(e.what(), "end of data")) {
             // Allow exceptions from under-length message on vRecv
             LogPrint(BCLog::NET,
                      "%s(%s, %u bytes): Exception '%s' caught, normally caused "
                      "by a message being shorter than its stated length\n",
                      __func__, SanitizeString(strCommand), nMessageSize,
                      e.what());
         } else if (strstr(e.what(), "size too large")) {
             // Allow exceptions from over-long size
             LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught\n",
                      __func__, SanitizeString(strCommand), nMessageSize,
                      e.what());
         } else if (strstr(e.what(), "non-canonical ReadCompactSize()")) {
             // Allow exceptions from non-canonical encoding
             LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' caught\n",
                      __func__, SanitizeString(strCommand), nMessageSize,
                      e.what());
         } else {
             PrintExceptionContinue(&e, "ProcessMessages()");
         }
     } catch (const std::exception &e) {
         PrintExceptionContinue(&e, "ProcessMessages()");
     } catch (...) {
         PrintExceptionContinue(nullptr, "ProcessMessages()");
     }
 
     if (!fRet) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes) FAILED peer=%d\n", __func__,
                  SanitizeString(strCommand), nMessageSize, pfrom->GetId());
     }
 
     LOCK(cs_main);
     SendRejectsAndCheckIfBanned(pfrom, m_enable_bip61);
 
     return fMoreWork;
 }
 
 void PeerLogicValidation::ConsiderEviction(CNode *pto,
                                            int64_t time_in_seconds) {
     AssertLockHeld(cs_main);
 
     CNodeState &state = *State(pto->GetId());
     const CNetMsgMaker msgMaker(pto->GetSendVersion());
 
     if (!state.m_chain_sync.m_protect &&
         IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
         // This is an outbound peer subject to disconnection if they don't
         // announce a block with as much work as the current tip within
         // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
         // chain has more work than ours, we should sync to it, unless it's
         // invalid, in which case we should find that out and disconnect from
         // them elsewhere).
         if (state.pindexBestKnownBlock != nullptr &&
             state.pindexBestKnownBlock->nChainWork >=
                 ::ChainActive().Tip()->nChainWork) {
             if (state.m_chain_sync.m_timeout != 0) {
                 state.m_chain_sync.m_timeout = 0;
                 state.m_chain_sync.m_work_header = nullptr;
                 state.m_chain_sync.m_sent_getheaders = false;
             }
         } else if (state.m_chain_sync.m_timeout == 0 ||
                    (state.m_chain_sync.m_work_header != nullptr &&
                     state.pindexBestKnownBlock != nullptr &&
                     state.pindexBestKnownBlock->nChainWork >=
                         state.m_chain_sync.m_work_header->nChainWork)) {
             // Our best block known by this peer is behind our tip, and we're
             // either noticing that for the first time, OR this peer was able to
             // catch up to some earlier point where we checked against our tip.
             // Either way, set a new timeout based on current tip.
             state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
             state.m_chain_sync.m_work_header = ::ChainActive().Tip();
             state.m_chain_sync.m_sent_getheaders = false;
         } else if (state.m_chain_sync.m_timeout > 0 &&
                    time_in_seconds > state.m_chain_sync.m_timeout) {
             // No evidence yet that our peer has synced to a chain with work
             // equal to that of our tip, when we first detected it was behind.
             // Send a single getheaders message to give the peer a chance to
             // update us.
             if (state.m_chain_sync.m_sent_getheaders) {
                 // They've run out of time to catch up!
                 LogPrintf(
                     "Disconnecting outbound peer %d for old chain, best known "
                     "block = %s\n",
                     pto->GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>");
                 pto->fDisconnect = true;
             } else {
                 assert(state.m_chain_sync.m_work_header);
                 LogPrint(
                     BCLog::NET,
                     "sending getheaders to outbound peer=%d to verify chain "
                     "work (current best known block:%s, benchmark blockhash: "
                     "%s)\n",
                     pto->GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>",
                     state.m_chain_sync.m_work_header->GetBlockHash()
                         .ToString());
                 connman->PushMessage(
                     pto,
                     msgMaker.Make(NetMsgType::GETHEADERS,
                                   ::ChainActive().GetLocator(
                                       state.m_chain_sync.m_work_header->pprev),
                                   uint256()));
                 state.m_chain_sync.m_sent_getheaders = true;
                 // 2 minutes
                 constexpr int64_t HEADERS_RESPONSE_TIME = 120;
                 // Bump the timeout to allow a response, which could clear the
                 // timeout (if the response shows the peer has synced), reset
                 // the timeout (if the peer syncs to the required work but not
                 // to our tip), or result in disconnect (if we advance to the
                 // timeout and pindexBestKnownBlock has not sufficiently
                 // progressed)
                 state.m_chain_sync.m_timeout =
                     time_in_seconds + HEADERS_RESPONSE_TIME;
             }
         }
     }
 }
 
 void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) {
     // Check whether we have too many outbound peers
     int extra_peers = connman->GetExtraOutboundCount();
     if (extra_peers <= 0) {
         return;
     }
 
     // If we have more outbound peers than we target, disconnect one.
     // Pick the outbound peer that least recently announced us a new block, with
     // ties broken by choosing the more recent connection (higher node id)
     NodeId worst_peer = -1;
     int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
 
     connman->ForEachNode([&](CNode *pnode) {
         AssertLockHeld(cs_main);
 
         // Ignore non-outbound peers, or nodes marked for disconnect already
         if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) {
             return;
         }
         CNodeState *state = State(pnode->GetId());
         if (state == nullptr) {
             // shouldn't be possible, but just in case
             return;
         }
         // Don't evict our protected peers
         if (state->m_chain_sync.m_protect) {
             return;
         }
         // Don't evict our block-relay-only peers.
         if (pnode->m_tx_relay == nullptr) {
             return;
         }
 
         if (state->m_last_block_announcement < oldest_block_announcement ||
             (state->m_last_block_announcement == oldest_block_announcement &&
              pnode->GetId() > worst_peer)) {
             worst_peer = pnode->GetId();
             oldest_block_announcement = state->m_last_block_announcement;
         }
     });
 
     if (worst_peer == -1) {
         return;
     }
 
     bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
         AssertLockHeld(cs_main);
 
         // Only disconnect a peer that has been connected to us for some
         // reasonable fraction of our check-frequency, to give it time for new
         // information to have arrived.
         // Also don't disconnect any peer we're trying to download a block from.
         CNodeState &state = *State(pnode->GetId());
         if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME &&
             state.nBlocksInFlight == 0) {
             LogPrint(BCLog::NET,
                      "disconnecting extra outbound peer=%d (last block "
                      "announcement received at time %d)\n",
                      pnode->GetId(), oldest_block_announcement);
             pnode->fDisconnect = true;
             return true;
         } else {
             LogPrint(BCLog::NET,
                      "keeping outbound peer=%d chosen for eviction "
                      "(connect time: %d, blocks_in_flight: %d)\n",
                      pnode->GetId(), pnode->nTimeConnected,
                      state.nBlocksInFlight);
             return false;
         }
     });
 
     if (disconnected) {
         // If we disconnected an extra peer, that means we successfully
         // connected to at least one peer after the last time we detected a
         // stale tip. Don't try any more extra peers until we next detect a
         // stale tip, to limit the load we put on the network from these extra
         // connections.
         connman->SetTryNewOutboundPeer(false);
     }
 }
 
 void PeerLogicValidation::CheckForStaleTipAndEvictPeers(
     const Consensus::Params &consensusParams) {
     LOCK(cs_main);
 
     if (connman == nullptr) {
         return;
     }
 
     int64_t time_in_seconds = GetTime();
 
     EvictExtraOutboundPeers(time_in_seconds);
 
     if (time_in_seconds <= m_stale_tip_check_time) {
         return;
     }
 
     // Check whether our tip is stale, and if so, allow using an extra outbound
     // peer.
     if (!fImporting && !fReindex && connman->GetNetworkActive() &&
         connman->GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) {
         LogPrintf("Potential stale tip detected, will try using extra outbound "
                   "peer (last tip update: %d seconds ago)\n",
                   time_in_seconds - g_last_tip_update);
         connman->SetTryNewOutboundPeer(true);
     } else if (connman->GetTryNewOutboundPeer()) {
         connman->SetTryNewOutboundPeer(false);
     }
     m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
 }
 
 namespace {
 class CompareInvMempoolOrder {
     CTxMemPool *mp;
 
 public:
     explicit CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; }
 
     bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
         /**
          * As std::make_heap produces a max-heap, we want the entries with the
          * fewest ancestors/highest fee to sort later.
          */
         return mp->CompareDepthAndScore(*b, *a);
     }
 };
 } // namespace
 
 bool PeerLogicValidation::SendMessages(const Config &config, CNode *pto,
                                        std::atomic<bool> &interruptMsgProc) {
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
 
     // Don't send anything until the version handshake is complete
     if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
         return true;
     }
 
     // If we get here, the outgoing message serialization version is set and
     // can't change.
     const CNetMsgMaker msgMaker(pto->GetSendVersion());
 
     //
     // Message: ping
     //
     bool pingSend = false;
     if (pto->fPingQueued) {
         // RPC ping request by user
         pingSend = true;
     }
     if (pto->nPingNonceSent == 0 &&
         pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
         // Ping automatically sent as a latency probe & keepalive.
         pingSend = true;
     }
     if (pingSend) {
         uint64_t nonce = 0;
         while (nonce == 0) {
             GetRandBytes((uint8_t *)&nonce, sizeof(nonce));
         }
         pto->fPingQueued = false;
         pto->nPingUsecStart = GetTimeMicros();
         if (pto->nVersion > BIP0031_VERSION) {
             pto->nPingNonceSent = nonce;
             connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
         } else {
             // Peer is too old to support ping command with nonce, pong will
             // never arrive.
             pto->nPingNonceSent = 0;
             connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
         }
     }
 
     // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
     TRY_LOCK(cs_main, lockMain);
     if (!lockMain) {
         return true;
     }
 
     if (SendRejectsAndCheckIfBanned(pto, m_enable_bip61)) {
         return true;
     }
     CNodeState &state = *State(pto->GetId());
 
     // Address refresh broadcast
     int64_t nNow = GetTimeMicros();
     if (pto->IsAddrRelayPeer() &&
         !::ChainstateActive().IsInitialBlockDownload() &&
         pto->nNextLocalAddrSend < nNow) {
         AdvertiseLocal(pto);
         pto->nNextLocalAddrSend =
             PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
     }
 
     //
     // Message: addr
     //
     if (pto->IsAddrRelayPeer() && pto->nNextAddrSend < nNow) {
         pto->nNextAddrSend =
             PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
         std::vector<CAddress> vAddr;
         vAddr.reserve(pto->vAddrToSend.size());
         for (const CAddress &addr : pto->vAddrToSend) {
             if (!pto->addrKnown.contains(addr.GetKey())) {
                 pto->addrKnown.insert(addr.GetKey());
                 vAddr.push_back(addr);
                 // receiver rejects addr messages larger than 1000
                 if (vAddr.size() >= 1000) {
                     connman->PushMessage(
                         pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
                     vAddr.clear();
                 }
             }
         }
         pto->vAddrToSend.clear();
         if (!vAddr.empty()) {
             connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
         }
 
         // we only send the big addr message once
         if (pto->vAddrToSend.capacity() > 40) {
             pto->vAddrToSend.shrink_to_fit();
         }
     }
 
     // Start block sync
     if (pindexBestHeader == nullptr) {
         pindexBestHeader = ::ChainActive().Tip();
     }
 
     // Download if this is a nice peer, or we have no nice peers and this one
     // might do.
     bool fFetch = state.fPreferredDownload ||
                   (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot);
 
     if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
         // Only actively request headers from a single peer, unless we're close
         // to today.
         if ((nSyncStarted == 0 && fFetch) ||
             pindexBestHeader->GetBlockTime() >
                 GetAdjustedTime() - 24 * 60 * 60) {
             state.fSyncStarted = true;
             state.nHeadersSyncTimeout =
                 GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE +
                 HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER *
                     (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) /
                     (consensusParams.nPowTargetSpacing);
             nSyncStarted++;
             const CBlockIndex *pindexStart = pindexBestHeader;
             /**
              * If possible, start at the block preceding the currently best
              * known header. This ensures that we always get a non-empty list of
              * headers back as long as the peer is up-to-date. With a non-empty
              * response, we can initialise the peer's known best block. This
              * wouldn't be possible if we requested starting at pindexBestHeader
              * and got back an empty response.
              */
             if (pindexStart->pprev) {
                 pindexStart = pindexStart->pprev;
             }
 
             LogPrint(BCLog::NET,
                      "initial getheaders (%d) to peer=%d (startheight:%d)\n",
                      pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
             connman->PushMessage(
                 pto, msgMaker.Make(NetMsgType::GETHEADERS,
                                    ::ChainActive().GetLocator(pindexStart),
                                    uint256()));
         }
     }
 
     //
     // Try sending block announcements via headers
     //
     {
         // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
         // hashes we're relaying, and our peer wants headers announcements, then
         // find the first header not yet known to our peer but would connect,
         // and send. If no header would connect, or if we have too many blocks,
         // or if the peer doesn't want headers, just add all to the inv queue.
         LOCK(pto->cs_inventory);
         std::vector<CBlock> vHeaders;
         bool fRevertToInv =
             ((!state.fPreferHeaders &&
               (!state.fPreferHeaderAndIDs ||
                pto->vBlockHashesToAnnounce.size() > 1)) ||
              pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
         // last header queued for delivery
         const CBlockIndex *pBestIndex = nullptr;
         // ensure pindexBestKnownBlock is up-to-date
         ProcessBlockAvailability(pto->GetId());
 
         if (!fRevertToInv) {
             bool fFoundStartingHeader = false;
             // Try to find first header that our peer doesn't have, and then
             // send all headers past that one. If we come across an headers that
             // aren't on ::ChainActive(), give up.
             for (const BlockHash &hash : pto->vBlockHashesToAnnounce) {
                 const CBlockIndex *pindex = LookupBlockIndex(hash);
                 assert(pindex);
                 if (::ChainActive()[pindex->nHeight] != pindex) {
                     // Bail out if we reorged away from this block
                     fRevertToInv = true;
                     break;
                 }
                 if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
                     // This means that the list of blocks to announce don't
                     // connect to each other. This shouldn't really be possible
                     // to hit during regular operation (because reorgs should
                     // take us to a chain that has some block not on the prior
                     // chain, which should be caught by the prior check), but
                     // one way this could happen is by using invalidateblock /
                     // reconsiderblock repeatedly on the tip, causing it to be
                     // added multiple times to vBlockHashesToAnnounce. Robustly
                     // deal with this rare situation by reverting to an inv.
                     fRevertToInv = true;
                     break;
                 }
                 pBestIndex = pindex;
                 if (fFoundStartingHeader) {
                     // add this to the headers message
                     vHeaders.push_back(pindex->GetBlockHeader());
                 } else if (PeerHasHeader(&state, pindex)) {
                     // Keep looking for the first new block.
                     continue;
                 } else if (pindex->pprev == nullptr ||
                            PeerHasHeader(&state, pindex->pprev)) {
                     // Peer doesn't have this header but they do have the prior
                     // one.
                     // Start sending headers.
                     fFoundStartingHeader = true;
                     vHeaders.push_back(pindex->GetBlockHeader());
                 } else {
                     // Peer doesn't have this header or the prior one --
                     // nothing will connect, so bail out.
                     fRevertToInv = true;
                     break;
                 }
             }
         }
         if (!fRevertToInv && !vHeaders.empty()) {
             if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
                 // We only send up to 1 block as header-and-ids, as otherwise
                 // probably means we're doing an initial-ish-sync or they're
                 // slow.
                 LogPrint(BCLog::NET,
                          "%s sending header-and-ids %s to peer=%d\n", __func__,
                          vHeaders.front().GetHash().ToString(), pto->GetId());
 
                 int nSendFlags = 0;
 
                 bool fGotBlockFromCache = false;
                 {
                     LOCK(cs_most_recent_block);
                     if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
                         CBlockHeaderAndShortTxIDs cmpctblock(
                             *most_recent_block);
                         connman->PushMessage(
                             pto,
                             msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                           cmpctblock));
                         fGotBlockFromCache = true;
                     }
                 }
                 if (!fGotBlockFromCache) {
                     CBlock block;
                     bool ret =
                         ReadBlockFromDisk(block, pBestIndex, consensusParams);
                     assert(ret);
                     CBlockHeaderAndShortTxIDs cmpctblock(block);
                     connman->PushMessage(
                         pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                            cmpctblock));
                 }
                 state.pindexBestHeaderSent = pBestIndex;
             } else if (state.fPreferHeaders) {
                 if (vHeaders.size() > 1) {
                     LogPrint(BCLog::NET,
                              "%s: %u headers, range (%s, %s), to peer=%d\n",
                              __func__, vHeaders.size(),
                              vHeaders.front().GetHash().ToString(),
                              vHeaders.back().GetHash().ToString(),
                              pto->GetId());
                 } else {
                     LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n",
                              __func__, vHeaders.front().GetHash().ToString(),
                              pto->GetId());
                 }
                 connman->PushMessage(
                     pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
                 state.pindexBestHeaderSent = pBestIndex;
             } else {
                 fRevertToInv = true;
             }
         }
         if (fRevertToInv) {
             // If falling back to using an inv, just try to inv the tip. The
             // last entry in vBlockHashesToAnnounce was our tip at some point in
             // the past.
             if (!pto->vBlockHashesToAnnounce.empty()) {
                 const BlockHash &hashToAnnounce =
                     pto->vBlockHashesToAnnounce.back();
                 const CBlockIndex *pindex = LookupBlockIndex(hashToAnnounce);
                 assert(pindex);
 
                 // Warn if we're announcing a block that is not on the main
                 // chain. This should be very rare and could be optimized out.
                 // Just log for now.
                 if (::ChainActive()[pindex->nHeight] != pindex) {
                     LogPrint(BCLog::NET,
                              "Announcing block %s not on main chain (tip=%s)\n",
                              hashToAnnounce.ToString(),
                              ::ChainActive().Tip()->GetBlockHash().ToString());
                 }
 
                 // If the peer's chain has this block, don't inv it back.
                 if (!PeerHasHeader(&state, pindex)) {
                     pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
                     LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n",
                              __func__, pto->GetId(), hashToAnnounce.ToString());
                 }
             }
         }
         pto->vBlockHashesToAnnounce.clear();
     }
 
     //
     // Message: inventory
     //
     std::vector<CInv> vInv;
     {
         LOCK(pto->cs_inventory);
         vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(),
                                       INVENTORY_BROADCAST_MAX_PER_MB *
                                           config.GetMaxBlockSize() / 1000000));
 
         // Add blocks
         for (const BlockHash &hash : pto->vInventoryBlockToSend) {
             vInv.push_back(CInv(MSG_BLOCK, hash));
             if (vInv.size() == MAX_INV_SZ) {
                 connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
                 vInv.clear();
             }
         }
         pto->vInventoryBlockToSend.clear();
 
         if (pto->m_tx_relay != nullptr) {
             LOCK(pto->m_tx_relay->cs_tx_inventory);
             // Check whether periodic sends should happen
             bool fSendTrickle = pto->HasPermission(PF_NOBAN);
             if (pto->m_tx_relay->nNextInvSend < nNow) {
                 fSendTrickle = true;
                 if (pto->fInbound) {
                     pto->m_tx_relay->nNextInvSend =
                         connman->PoissonNextSendInbound(
                             nNow, INVENTORY_BROADCAST_INTERVAL);
                 } else {
                     // Use half the delay for outbound peers, as there is less
                     // privacy concern for them.
                     pto->m_tx_relay->nNextInvSend = PoissonNextSend(
                         nNow, INVENTORY_BROADCAST_INTERVAL >> 1);
                 }
             }
 
             // Time to send but the peer has requested we not relay
             // transactions.
             if (fSendTrickle) {
                 LOCK(pto->m_tx_relay->cs_filter);
                 if (!pto->m_tx_relay->fRelayTxes) {
                     pto->m_tx_relay->setInventoryTxToSend.clear();
                 }
             }
 
             // Respond to BIP35 mempool requests
             if (fSendTrickle && pto->m_tx_relay->fSendMempool) {
                 auto vtxinfo = g_mempool.infoAll();
                 pto->m_tx_relay->fSendMempool = false;
                 Amount filterrate = Amount::zero();
                 {
                     LOCK(pto->m_tx_relay->cs_feeFilter);
                     filterrate = pto->m_tx_relay->minFeeFilter;
                 }
 
                 LOCK(pto->m_tx_relay->cs_filter);
 
                 for (const auto &txinfo : vtxinfo) {
                     const TxId &txid = txinfo.tx->GetId();
                     CInv inv(MSG_TX, txid);
                     pto->m_tx_relay->setInventoryTxToSend.erase(txid);
                     if (filterrate != Amount::zero() &&
                         txinfo.feeRate.GetFeePerK() < filterrate) {
                         continue;
                     }
                     if (pto->m_tx_relay->pfilter &&
                         !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     pto->m_tx_relay->filterInventoryKnown.insert(txid);
                     vInv.push_back(inv);
                     if (vInv.size() == MAX_INV_SZ) {
                         connman->PushMessage(
                             pto, msgMaker.Make(NetMsgType::INV, vInv));
                         vInv.clear();
                     }
                 }
                 pto->m_tx_relay->m_last_mempool_req =
                     GetTime<std::chrono::seconds>();
             }
 
             // Determine transactions to relay
             if (fSendTrickle) {
                 // Produce a vector with all candidates for sending
                 std::vector<std::set<TxId>::iterator> vInvTx;
                 vInvTx.reserve(pto->m_tx_relay->setInventoryTxToSend.size());
                 for (std::set<TxId>::iterator it =
                          pto->m_tx_relay->setInventoryTxToSend.begin();
                      it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) {
                     vInvTx.push_back(it);
                 }
                 Amount filterrate = Amount::zero();
                 {
                     LOCK(pto->m_tx_relay->cs_feeFilter);
                     filterrate = pto->m_tx_relay->minFeeFilter;
                 }
                 // Topologically and fee-rate sort the inventory we send for
                 // privacy and priority reasons. A heap is used so that not all
                 // items need sorting if only a few are being sent.
                 CompareInvMempoolOrder compareInvMempoolOrder(&g_mempool);
                 std::make_heap(vInvTx.begin(), vInvTx.end(),
                                compareInvMempoolOrder);
                 // No reason to drain out at many times the network's capacity,
                 // especially since we have many peers and some will draw much
                 // shorter delays.
                 unsigned int nRelayedTransactions = 0;
                 LOCK(pto->m_tx_relay->cs_filter);
                 while (!vInvTx.empty() &&
                        nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
                                                   config.GetMaxBlockSize() /
                                                   1000000) {
                     // Fetch the top element from the heap
                     std::pop_heap(vInvTx.begin(), vInvTx.end(),
                                   compareInvMempoolOrder);
                     std::set<TxId>::iterator it = vInvTx.back();
                     vInvTx.pop_back();
                     const TxId txid = *it;
                     // Remove it from the to-be-sent set
                     pto->m_tx_relay->setInventoryTxToSend.erase(it);
                     // Check if not in the filter already
                     if (pto->m_tx_relay->filterInventoryKnown.contains(txid)) {
                         continue;
                     }
                     // Not in the mempool anymore? don't bother sending it.
                     auto txinfo = g_mempool.info(txid);
                     if (!txinfo.tx) {
                         continue;
                     }
                     if (filterrate != Amount::zero() &&
                         txinfo.feeRate.GetFeePerK() < filterrate) {
                         continue;
                     }
                     if (pto->m_tx_relay->pfilter &&
                         !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     // Send
                     vInv.push_back(CInv(MSG_TX, txid));
                     nRelayedTransactions++;
                     {
                         // Expire old relay messages
                         while (!vRelayExpiration.empty() &&
                                vRelayExpiration.front().first < nNow) {
                             mapRelay.erase(vRelayExpiration.front().second);
                             vRelayExpiration.pop_front();
                         }
 
                         auto ret = mapRelay.insert(
                             std::make_pair(txid, std::move(txinfo.tx)));
                         if (ret.second) {
                             vRelayExpiration.push_back(
                                 std::make_pair(nNow +
                                                    std::chrono::microseconds{
                                                        RELAY_TX_CACHE_TIME}
                                                        .count(),
                                                ret.first));
                         }
                     }
                     if (vInv.size() == MAX_INV_SZ) {
                         connman->PushMessage(
                             pto, msgMaker.Make(NetMsgType::INV, vInv));
                         vInv.clear();
                     }
                     pto->m_tx_relay->filterInventoryKnown.insert(txid);
                 }
             }
         }
     }
     if (!vInv.empty()) {
         connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
     }
 
     // Detect whether we're stalling
     const auto current_time = GetTime<std::chrono::microseconds>();
     // nNow is the current system time (GetTimeMicros is not mockable) and
     // should be replaced by the mockable current_time eventually
     nNow = GetTimeMicros();
     if (state.nStallingSince &&
         state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
         // Stalling only triggers when the block download window cannot move.
         // During normal steady state, the download window should be much larger
         // than the to-be-downloaded set of blocks, so disconnection should only
         // happen during initial block download.
         LogPrintf("Peer=%d is stalling block download, disconnecting\n",
                   pto->GetId());
         pto->fDisconnect = true;
         return true;
     }
     // In case there is a block that has been in flight from this peer for 2 +
     // 0.5 * N times the block interval (with N the number of peers from which
     // we're downloading validated blocks), disconnect due to timeout. We
     // compensate for other peers to prevent killing off peers due to our own
     // downstream link being saturated. We only count validated in-flight blocks
     // so peers can't advertise non-existing block hashes to unreasonably
     // increase our timeout.
     if (state.vBlocksInFlight.size() > 0) {
         QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
         int nOtherPeersWithValidatedDownloads =
             nPeersWithValidatedDownloads -
             (state.nBlocksInFlightValidHeaders > 0);
         if (nNow > state.nDownloadingSince +
                        consensusParams.nPowTargetSpacing *
                            (BLOCK_DOWNLOAD_TIMEOUT_BASE +
                             BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
                                 nOtherPeersWithValidatedDownloads)) {
             LogPrintf("Timeout downloading block %s from peer=%d, "
                       "disconnecting\n",
                       queuedBlock.hash.ToString(), pto->GetId());
             pto->fDisconnect = true;
             return true;
         }
     }
 
     // Check for headers sync timeouts
     if (state.fSyncStarted &&
         state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
         // Detect whether this is a stalling initial-headers-sync peer
         if (pindexBestHeader->GetBlockTime() <=
             GetAdjustedTime() - 24 * 60 * 60) {
             if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 &&
                 (nPreferredDownload - state.fPreferredDownload >= 1)) {
                 // Disconnect a (non-whitelisted) peer if it is our only sync
                 // peer, and we have others we could be using instead.
                 // Note: If all our peers are inbound, then we won't disconnect
                 // our sync peer for stalling; we have bigger problems if we
                 // can't get any outbound peers.
                 if (!pto->HasPermission(PF_NOBAN)) {
                     LogPrintf("Timeout downloading headers from peer=%d, "
                               "disconnecting\n",
                               pto->GetId());
                     pto->fDisconnect = true;
                     return true;
                 } else {
                     LogPrintf("Timeout downloading headers from whitelisted "
                               "peer=%d, not disconnecting\n",
                               pto->GetId());
                     // Reset the headers sync state so that we have a chance to
                     // try downloading from a different peer.
                     // Note: this will also result in at least one more
                     // getheaders message to be sent to this peer (eventually).
                     state.fSyncStarted = false;
                     nSyncStarted--;
                     state.nHeadersSyncTimeout = 0;
                 }
             }
         } else {
             // After we've caught up once, reset the timeout so we can't trigger
             // disconnect later.
             state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
         }
     }
 
     // Check that outbound peers have reasonable chains GetTime() is used by
     // this anti-DoS logic so we can test this using mocktime.
     ConsiderEviction(pto, GetTime());
 
     //
     // Message: getdata (blocks)
     //
     std::vector<CInv> vGetData;
     if (!pto->fClient &&
         ((fFetch && !pto->m_limited_node) ||
          !::ChainstateActive().IsInitialBlockDownload()) &&
         state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
         std::vector<const CBlockIndex *> vToDownload;
         NodeId staller = -1;
         FindNextBlocksToDownload(pto->GetId(),
                                  MAX_BLOCKS_IN_TRANSIT_PER_PEER -
                                      state.nBlocksInFlight,
                                  vToDownload, staller, consensusParams);
         for (const CBlockIndex *pindex : vToDownload) {
             vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
             MarkBlockAsInFlight(config, pto->GetId(), pindex->GetBlockHash(),
                                 consensusParams, pindex);
             LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
                      pindex->GetBlockHash().ToString(), pindex->nHeight,
                      pto->GetId());
         }
         if (state.nBlocksInFlight == 0 && staller != -1) {
             if (State(staller)->nStallingSince == 0) {
                 State(staller)->nStallingSince = nNow;
                 LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
             }
         }
     }
 
     //
     // Message: getdata (transactions)
     //
 
     // For robustness, expire old requests after a long timeout, so that we can
     // resume downloading transactions from a peer even if they were
     // unresponsive in the past. Eventually we should consider disconnecting
     // peers, but this is conservative.
     if (state.m_tx_download.m_check_expiry_timer <= current_time) {
         for (auto it = state.m_tx_download.m_tx_in_flight.begin();
              it != state.m_tx_download.m_tx_in_flight.end();) {
             if (it->second <= current_time - TX_EXPIRY_INTERVAL) {
                 LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
                          it->first.ToString(), pto->GetId());
                 state.m_tx_download.m_tx_announced.erase(it->first);
                 state.m_tx_download.m_tx_in_flight.erase(it++);
             } else {
                 ++it;
             }
         }
         // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize
         // so that we're not doing this for all peers at the same time.
         state.m_tx_download.m_check_expiry_timer =
             current_time + TX_EXPIRY_INTERVAL / 2 +
             GetRandMicros(TX_EXPIRY_INTERVAL);
     }
 
     auto &tx_process_time = state.m_tx_download.m_tx_process_time;
     while (!tx_process_time.empty() &&
            tx_process_time.begin()->first <= current_time &&
            state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) {
         const TxId txid = tx_process_time.begin()->second;
         // Erase this entry from tx_process_time (it may be added back for
         // processing at a later time, see below)
         tx_process_time.erase(tx_process_time.begin());
         CInv inv(MSG_TX, txid);
         if (!AlreadyHave(inv)) {
             // If this transaction was last requested more than 1 minute ago,
             // then request.
             const auto last_request_time = GetTxRequestTime(txid);
             if (last_request_time <= current_time - GETDATA_TX_INTERVAL) {
                 LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(),
                          pto->GetId());
                 vGetData.push_back(inv);
                 if (vGetData.size() >= MAX_GETDATA_SZ) {
                     connman->PushMessage(
                         pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
                     vGetData.clear();
                 }
                 UpdateTxRequestTime(txid, current_time);
                 state.m_tx_download.m_tx_in_flight.emplace(txid, current_time);
             } else {
                 // This transaction is in flight from someone else; queue
                 // up processing to happen after the download times out
                 // (with a slight delay for inbound peers, to prefer
                 // requests to outbound peers).
                 const auto next_process_time = CalculateTxGetDataTime(
                     txid, current_time, !state.fPreferredDownload);
                 tx_process_time.emplace(next_process_time, txid);
             }
         } else {
             // We have already seen this transaction, no need to download.
             state.m_tx_download.m_tx_announced.erase(txid);
             state.m_tx_download.m_tx_in_flight.erase(txid);
         }
     }
 
     if (!vGetData.empty()) {
         connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
     }
 
     //
     // Message: feefilter
     //
     // We don't want white listed peers to filter txs to us if we have
     // -whitelistforcerelay
     if (pto->m_tx_relay != nullptr && pto->nVersion >= FEEFILTER_VERSION &&
         gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
         !pto->HasPermission(PF_FORCERELAY)) {
         Amount currentFilter =
             g_mempool
                 .GetMinFee(
                     gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) *
                     1000000)
                 .GetFeePerK();
         int64_t timeNow = GetTimeMicros();
         if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) {
             static CFeeRate default_feerate =
                 CFeeRate(DEFAULT_MIN_RELAY_TX_FEE_PER_KB);
             static FeeFilterRounder filterRounder(default_feerate);
             Amount filterToSend = filterRounder.round(currentFilter);
             filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
 
             if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) {
                 connman->PushMessage(
                     pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
                 pto->m_tx_relay->lastSentFeeFilter = filterToSend;
             }
             pto->m_tx_relay->nextSendTimeFeeFilter =
                 PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
         }
         // If the fee filter has changed substantially and it's still more than
         // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
         // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
         else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 <
                      pto->m_tx_relay->nextSendTimeFeeFilter &&
                  (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 ||
                   currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) {
             pto->m_tx_relay->nextSendTimeFeeFilter =
                 timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
         }
     }
     return true;
 }
 
 class CNetProcessingCleanup {
 public:
     CNetProcessingCleanup() {}
     ~CNetProcessingCleanup() {
         // orphan transactions
         mapOrphanTransactions.clear();
         mapOrphanTransactionsByPrev.clear();
     }
 };
 static CNetProcessingCleanup instance_of_cnetprocessingcleanup;
diff --git a/src/protocol.h b/src/protocol.h
index 1c6729707..e3d0505c6 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -1,479 +1,479 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef __cplusplus
 #error This header can only be compiled as C++.
 #endif
 
 #ifndef BITCOIN_PROTOCOL_H
 #define BITCOIN_PROTOCOL_H
 
 #include <netaddress.h>
 #include <serialize.h>
 #include <uint256.h>
 #include <version.h>
 
 #include <array>
 #include <cstdint>
 #include <string>
 
 class Config;
 
 /**
  * Maximum length of incoming protocol messages (Currently 2MB).
  * NB: Messages propagating block content are not subject to this limit.
  */
 static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024;
 
 /**
  * Message header.
  * (4) message start.
  * (12) command.
  * (4) size.
  * (4) checksum.
  */
 class CMessageHeader {
 public:
     static constexpr size_t MESSAGE_START_SIZE = 4;
     static constexpr size_t COMMAND_SIZE = 12;
     static constexpr size_t MESSAGE_SIZE_SIZE = 4;
     static constexpr size_t CHECKSUM_SIZE = 4;
     static constexpr size_t MESSAGE_SIZE_OFFSET =
         MESSAGE_START_SIZE + COMMAND_SIZE;
     static constexpr size_t CHECKSUM_OFFSET =
         MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE;
     static constexpr size_t HEADER_SIZE =
         MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE;
     typedef std::array<uint8_t, MESSAGE_START_SIZE> MessageMagic;
 
     explicit CMessageHeader(const MessageMagic &pchMessageStartIn);
 
     /**
      * Construct a P2P message header from message-start characters, a command
      * and the size of the message.
      * @note Passing in a `pszCommand` longer than COMMAND_SIZE will result in a
      * run-time assertion error.
      */
     CMessageHeader(const MessageMagic &pchMessageStartIn,
                    const char *pszCommand, unsigned int nMessageSizeIn);
 
     std::string GetCommand() const;
     bool IsValid(const Config &config) const;
     bool IsValidWithoutConfig(const MessageMagic &magic) const;
     bool IsOversized(const Config &config) const;
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         READWRITE(pchMessageStart);
         READWRITE(pchCommand);
         READWRITE(nMessageSize);
         READWRITE(pchChecksum);
     }
 
     MessageMagic pchMessageStart;
     std::array<char, COMMAND_SIZE> pchCommand;
     uint32_t nMessageSize;
     uint8_t pchChecksum[CHECKSUM_SIZE];
 };
 
 /**
  * Bitcoin protocol message types. When adding new message types, don't forget
  * to update allNetMessageTypes in protocol.cpp.
  */
 namespace NetMsgType {
 
 /**
  * The version message provides information about the transmitting node to the
  * receiving node at the beginning of a connection.
  * @see https://bitcoin.org/en/developer-reference#version
  */
 extern const char *VERSION;
 /**
  * The verack message acknowledges a previously-received version message,
  * informing the connecting node that it can begin to send other messages.
  * @see https://bitcoin.org/en/developer-reference#verack
  */
 extern const char *VERACK;
 /**
  * The addr (IP address) message relays connection information for peers on the
  * network.
  * @see https://bitcoin.org/en/developer-reference#addr
  */
 extern const char *ADDR;
 /**
  * The inv message (inventory message) transmits one or more inventories of
  * objects known to the transmitting peer.
  * @see https://bitcoin.org/en/developer-reference#inv
  */
 extern const char *INV;
 /**
  * The getdata message requests one or more data objects from another node.
  * @see https://bitcoin.org/en/developer-reference#getdata
  */
 extern const char *GETDATA;
 /**
  * The merkleblock message is a reply to a getdata message which requested a
  * block using the inventory type MSG_MERKLEBLOCK.
  * @since protocol version 70001 as described by BIP37.
  * @see https://bitcoin.org/en/developer-reference#merkleblock
  */
 extern const char *MERKLEBLOCK;
 /**
  * The getblocks message requests an inv message that provides block header
  * hashes starting from a particular point in the block chain.
  * @see https://bitcoin.org/en/developer-reference#getblocks
  */
 extern const char *GETBLOCKS;
 /**
  * The getheaders message requests a headers message that provides block
  * headers starting from a particular point in the block chain.
  * @since protocol version 31800.
  * @see https://bitcoin.org/en/developer-reference#getheaders
  */
 extern const char *GETHEADERS;
 /**
  * The tx message transmits a single transaction.
  * @see https://bitcoin.org/en/developer-reference#tx
  */
 extern const char *TX;
 /**
  * The headers message sends one or more block headers to a node which
  * previously requested certain headers with a getheaders message.
  * @since protocol version 31800.
  * @see https://bitcoin.org/en/developer-reference#headers
  */
 extern const char *HEADERS;
 /**
  * The block message transmits a single serialized block.
  * @see https://bitcoin.org/en/developer-reference#block
  */
 extern const char *BLOCK;
 /**
  * The getaddr message requests an addr message from the receiving node,
  * preferably one with lots of IP addresses of other receiving nodes.
  * @see https://bitcoin.org/en/developer-reference#getaddr
  */
 extern const char *GETADDR;
 /**
  * The mempool message requests the TXIDs of transactions that the receiving
  * node has verified as valid but which have not yet appeared in a block.
  * @since protocol version 60002.
  * @see https://bitcoin.org/en/developer-reference#mempool
  */
 extern const char *MEMPOOL;
 /**
  * The ping message is sent periodically to help confirm that the receiving
  * peer is still connected.
  * @see https://bitcoin.org/en/developer-reference#ping
  */
 extern const char *PING;
 /**
  * The pong message replies to a ping message, proving to the pinging node that
  * the ponging node is still alive.
  * @since protocol version 60001 as described by BIP31.
  * @see https://bitcoin.org/en/developer-reference#pong
  */
 extern const char *PONG;
 /**
  * The notfound message is a reply to a getdata message which requested an
  * object the receiving node does not have available for relay.
  * @ince protocol version 70001.
  * @see https://bitcoin.org/en/developer-reference#notfound
  */
 extern const char *NOTFOUND;
 /**
  * The filterload message tells the receiving peer to filter all relayed
  * transactions and requested merkle blocks through the provided filter.
  * @since protocol version 70001 as described by BIP37.
  *   Only available with service bit NODE_BLOOM since protocol version
  *   70011 as described by BIP111.
  * @see https://bitcoin.org/en/developer-reference#filterload
  */
 extern const char *FILTERLOAD;
 /**
  * The filteradd message tells the receiving peer to add a single element to a
  * previously-set bloom filter, such as a new public key.
  * @since protocol version 70001 as described by BIP37.
  *   Only available with service bit NODE_BLOOM since protocol version
  *   70011 as described by BIP111.
  * @see https://bitcoin.org/en/developer-reference#filteradd
  */
 extern const char *FILTERADD;
 /**
  * The filterclear message tells the receiving peer to remove a previously-set
  * bloom filter.
  * @since protocol version 70001 as described by BIP37.
  *   Only available with service bit NODE_BLOOM since protocol version
  *   70011 as described by BIP111.
  * @see https://bitcoin.org/en/developer-reference#filterclear
  */
 extern const char *FILTERCLEAR;
 /**
  * The reject message informs the receiving node that one of its previous
  * messages has been rejected.
  * @since protocol version 70002 as described by BIP61.
  * @see https://bitcoin.org/en/developer-reference#reject
  */
 extern const char *REJECT;
 /**
  * Indicates that a node prefers to receive new block announcements via a
  * "headers" message rather than an "inv".
  * @since protocol version 70012 as described by BIP130.
  * @see https://bitcoin.org/en/developer-reference#sendheaders
  */
 extern const char *SENDHEADERS;
 /**
  * The feefilter message tells the receiving peer not to inv us any txs
  * which do not meet the specified min fee rate.
  * @since protocol version 70013 as described by BIP133
  */
 extern const char *FEEFILTER;
 /**
  * Contains a 1-byte bool and 8-byte LE version number.
  * Indicates that a node is willing to provide blocks via "cmpctblock" messages.
  * May indicate that a node prefers to receive new block announcements via a
  * "cmpctblock" message rather than an "inv", depending on message contents.
  * @since protocol version 70014 as described by BIP 152
  */
 extern const char *SENDCMPCT;
 /**
  * Contains a CBlockHeaderAndShortTxIDs object - providing a header and
  * list of "short txids".
  * @since protocol version 70014 as described by BIP 152
  */
 extern const char *CMPCTBLOCK;
 /**
  * Contains a BlockTransactionsRequest
  * Peer should respond with "blocktxn" message.
  * @since protocol version 70014 as described by BIP 152
  */
 extern const char *GETBLOCKTXN;
 /**
  * Contains a BlockTransactions.
  * Sent in response to a "getblocktxn" message.
  * @since protocol version 70014 as described by BIP 152
  */
 extern const char *BLOCKTXN;
 /**
- * Contains an AvalanchePoll.
+ * Contains an avalanche::Poll.
  * Peer should respond with "avaresponse" message.
  */
 extern const char *AVAPOLL;
 /**
- * Contains an AvalancheResponse.
+ * Contains an avalanche::Response.
  * Sent in response to a "avapoll" message.
  */
 extern const char *AVARESPONSE;
 
 /**
  * Indicate if the message is used to transmit the content of a block.
  * These messages can be significantly larger than usual messages and therefore
  * may need to be processed differently.
  */
 bool IsBlockLike(const std::string &strCommand);
 }; // namespace NetMsgType
 
 /* Get a vector of all valid message types (see above) */
 const std::vector<std::string> &getAllNetMessageTypes();
 
 /**
  * nServices flags.
  */
 enum ServiceFlags : uint64_t {
     // Nothing
     NODE_NONE = 0,
     // NODE_NETWORK means that the node is capable of serving the complete block
     // chain. It is currently set by all Bitcoin ABC non pruned nodes, and is
     // unset by SPV clients or other light clients.
     NODE_NETWORK = (1 << 0),
     // NODE_GETUTXO means the node is capable of responding to the getutxo
     // protocol request. Bitcoin ABC does not support this but a patch set
     // called Bitcoin XT does. See BIP 64 for details on how this is
     // implemented.
     NODE_GETUTXO = (1 << 1),
     // NODE_BLOOM means the node is capable and willing to handle bloom-filtered
     // connections. Bitcoin ABC nodes used to support this by default, without
     // advertising this bit, but no longer do as of protocol version 70011 (=
     // NO_BLOOM_VERSION)
     NODE_BLOOM = (1 << 2),
     // NODE_XTHIN means the node supports Xtreme Thinblocks. If this is turned
     // off then the node will not service nor make xthin requests.
     NODE_XTHIN = (1 << 4),
     // NODE_BITCOIN_CASH means the node supports Bitcoin Cash and the
     // associated consensus rule changes.
     // This service bit is intended to be used prior until some time after the
     // UAHF activation when the Bitcoin Cash network has adequately separated.
     // TODO: remove (free up) the NODE_BITCOIN_CASH service bit once no longer
     // needed.
     NODE_BITCOIN_CASH = (1 << 5),
     // NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation
     // of only serving the last 288 (2 day) blocks
     // See BIP159 for details on how this is implemented.
     NODE_NETWORK_LIMITED = (1 << 10),
 
     // The last non experimental service bit, helper for looping over the flags
     NODE_LAST_NON_EXPERIMENTAL_SERVICE_BIT = (1 << 23),
 
     // Bits 24-31 are reserved for temporary experiments. Just pick a bit that
     // isn't getting used, or one not being used much, and notify the
     // bitcoin-development mailing list. Remember that service bits are just
     // unauthenticated advertisements, so your code must be robust against
     // collisions and other cases where nodes may be advertising a service they
     // do not actually support. Other service bits should be allocated via the
     // BIP process.
 
     // NODE_AVALANCHE means the node supports Bitcoin Cash's avalanche
     // preconsensus mechanism.
     NODE_AVALANCHE = (1 << 24),
 };
 
 /**
  * Gets the set of service flags which are "desirable" for a given peer.
  *
  * These are the flags which are required for a peer to support for them
  * to be "interesting" to us, ie for us to wish to use one of our few
  * outbound connection slots for or for us to wish to prioritize keeping
  * their connection around.
  *
  * Relevant service flags may be peer- and state-specific in that the
  * version of the peer may determine which flags are required (eg in the
  * case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers
  * unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which
  * case NODE_NETWORK_LIMITED suffices).
  *
  * Thus, generally, avoid calling with peerServices == NODE_NONE, unless
  * state-specific flags must absolutely be avoided. When called with
  * peerServices == NODE_NONE, the returned desirable service flags are
  * guaranteed to not change dependent on state - ie they are suitable for
  * use when describing peers which we know to be desirable, but for which
  * we do not have a confirmed set of service flags.
  *
  * If the NODE_NONE return value is changed, contrib/seeds/makeseeds.py
  * should be updated appropriately to filter for the same nodes.
  */
 ServiceFlags GetDesirableServiceFlags(ServiceFlags services);
 
 /**
  * Set the current IBD status in order to figure out the desirable service
  * flags
  */
 void SetServiceFlagsIBDCache(bool status);
 
 /**
  * A shortcut for (services & GetDesirableServiceFlags(services))
  * == GetDesirableServiceFlags(services), ie determines whether the given
  * set of service flags are sufficient for a peer to be "relevant".
  */
 static inline bool HasAllDesirableServiceFlags(ServiceFlags services) {
     return !(GetDesirableServiceFlags(services) & (~services));
 }
 
 /**
  * Checks if a peer with the given service flags may be capable of having a
  * robust address-storage DB.
  */
 static inline bool MayHaveUsefulAddressDB(ServiceFlags services) {
     return (services & NODE_NETWORK) || (services & NODE_NETWORK_LIMITED);
 }
 
 /**
  * A CService with information about it as peer.
  */
 class CAddress : public CService {
 public:
     CAddress();
     explicit CAddress(CService ipIn, ServiceFlags nServicesIn);
 
     void Init();
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         if (ser_action.ForRead()) Init();
         int nVersion = s.GetVersion();
         if (s.GetType() & SER_DISK) READWRITE(nVersion);
         if ((s.GetType() & SER_DISK) ||
             (nVersion >= CADDR_TIME_VERSION && !(s.GetType() & SER_GETHASH)))
             READWRITE(nTime);
         uint64_t nServicesInt = nServices;
         READWRITE(nServicesInt);
         nServices = static_cast<ServiceFlags>(nServicesInt);
         READWRITEAS(CService, *this);
     }
 
     // TODO: make private (improves encapsulation)
 public:
     ServiceFlags nServices;
 
     // disk and network only
     unsigned int nTime;
 };
 
 /** getdata message type flags */
 const uint32_t MSG_TYPE_MASK = 0xffffffff >> 3;
 
 /** getdata / inv message types.
  * These numbers are defined by the protocol. When adding a new value, be sure
  * to mention it in the respective BIP.
  */
 enum GetDataMsg {
     UNDEFINED = 0,
     MSG_TX = 1,
     MSG_BLOCK = 2,
     // The following can only occur in getdata. Invs always use TX or BLOCK.
     //! Defined in BIP37
     MSG_FILTERED_BLOCK = 3,
     //! Defined in BIP152
     MSG_CMPCT_BLOCK = 4,
 };
 
 /**
  * Inv(ventory) message data.
  * Intended as non-ambiguous identifier of objects (eg. transactions, blocks)
  * held by peers.
  */
 class CInv {
 public:
     // TODO: make private (improves encapsulation)
     uint32_t type;
     uint256 hash;
 
 public:
     CInv() : type(0), hash() {}
     CInv(uint32_t typeIn, const uint256 &hashIn) : type(typeIn), hash(hashIn) {}
 
     ADD_SERIALIZE_METHODS;
 
     template <typename Stream, typename Operation>
     inline void SerializationOp(Stream &s, Operation ser_action) {
         READWRITE(type);
         READWRITE(hash);
     }
 
     friend bool operator<(const CInv &a, const CInv &b) {
         return a.type < b.type || (a.type == b.type && a.hash < b.hash);
     }
 
     std::string GetCommand() const;
     std::string ToString() const;
 
     uint32_t GetKind() const { return type & MSG_TYPE_MASK; }
 
     bool IsTx() const {
         auto k = GetKind();
         return k == MSG_TX;
     }
 
     bool IsSomeBlock() const {
         auto k = GetKind();
         return k == MSG_BLOCK || k == MSG_FILTERED_BLOCK ||
                k == MSG_CMPCT_BLOCK;
     }
 };
 
 #endif // BITCOIN_PROTOCOL_H