diff --git a/src/avalanche/test/processor_tests.cpp b/src/avalanche/test/processor_tests.cpp
index 87b980aac2..0f8058f191 100644
--- a/src/avalanche/test/processor_tests.cpp
+++ b/src/avalanche/test/processor_tests.cpp
@@ -1,2540 +1,2540 @@
 // Copyright (c) 2018-2020 The Bitcoin developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <avalanche/processor.h>
 
 #include <arith_uint256.h>
 #include <avalanche/avalanche.h>
 #include <avalanche/delegationbuilder.h>
 #include <avalanche/peermanager.h>
 #include <avalanche/proofbuilder.h>
 #include <avalanche/voterecord.h>
 #include <chain.h>
 #include <config.h>
 #include <core_io.h>
 #include <key_io.h>
 #include <net_processing.h> // For ::PeerManager
 #include <reverse_iterator.h>
 #include <scheduler.h>
 #include <util/time.h>
 #include <util/translation.h> // For bilingual_str
 // D6970 moved LookupBlockIndex from chain.h to validation.h TODO: remove this
 // when LookupBlockIndex is refactored out of validation
 #include <validation.h>
 
 #include <avalanche/test/util.h>
 #include <test/util/setup_common.h>
 
 #include <boost/mpl/list.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include <functional>
 #include <limits>
 #include <type_traits>
 #include <vector>
 
 using namespace avalanche;
 
 namespace avalanche {
 namespace {
     struct AvalancheTest {
         static void runEventLoop(avalanche::Processor &p) { p.runEventLoop(); }
 
         static std::vector<CInv> getInvsForNextPoll(Processor &p) {
             return p.getInvsForNextPoll(false);
         }
 
         static NodeId getSuitableNodeToQuery(Processor &p) {
             return WITH_LOCK(p.cs_peerManager,
                              return p.peerManager->selectNode());
         }
 
         static uint64_t getRound(const Processor &p) { return p.round; }
 
         static uint32_t getMinQuorumScore(const Processor &p) {
             return p.minQuorumScore;
         }
 
         static double getMinQuorumConnectedScoreRatio(const Processor &p) {
             return p.minQuorumConnectedScoreRatio;
         }
 
         static void clearavaproofsNodeCounter(Processor &p) {
             p.avaproofsNodeCounter = 0;
         }
 
         static void addVoteRecord(Processor &p, AnyVoteItem &item,
                                   VoteRecord &voteRecord) {
             p.voteRecords.getWriteView()->insert(
                 std::make_pair(item, voteRecord));
         }
 
         static void setFinalizationTip(Processor &p,
                                        const CBlockIndex *pindex) {
             LOCK(p.cs_finalizationTip);
             p.finalizationTip = pindex;
         }
 
         static void setLocalProofShareable(Processor &p, bool shareable) {
             p.m_canShareLocalProof = shareable;
         }
 
         static void updatedBlockTip(Processor &p) { p.updatedBlockTip(); }
 
         static void addProofToRecentfinalized(Processor &p,
                                               const ProofId &proofid) {
             WITH_LOCK(p.cs_finalizedItems,
                       return p.finalizedItems.insert(proofid));
         }
     };
 } // namespace
 
 struct TestVoteRecord : public VoteRecord {
     explicit TestVoteRecord(uint16_t conf) : VoteRecord(true) {
         confidence |= conf << 1;
     }
 };
 } // namespace avalanche
 
 namespace {
 struct CConnmanTest : public CConnman {
     using CConnman::CConnman;
     void AddNode(CNode &node) {
         LOCK(m_nodes_mutex);
         m_nodes.push_back(&node);
     }
     void ClearNodes() {
         LOCK(m_nodes_mutex);
         for (CNode *node : m_nodes) {
             delete node;
         }
         m_nodes.clear();
     }
 };
 
 CService ip(uint32_t i) {
     struct in_addr s;
     s.s_addr = i;
     return CService(CNetAddr(s), Params().GetDefaultPort());
 }
 
 struct AvalancheTestingSetup : public TestChain100Setup {
     const ::Config &config;
     CConnmanTest *m_connman;
 
     std::unique_ptr<Processor> m_processor;
 
     // The master private key we delegate to.
     CKey masterpriv;
 
     std::unordered_set<std::string> m_overridden_args;
 
     AvalancheTestingSetup()
         : TestChain100Setup(), config(GetConfig()),
           masterpriv(CKey::MakeCompressedKey()) {
         // Deterministic randomness for tests.
         auto connman = std::make_unique<CConnmanTest>(config, 0x1337, 0x1337,
                                                       *m_node.addrman);
         m_connman = connman.get();
         m_node.connman = std::move(connman);
 
         // Get the processor ready.
         setArg("-avaminquorumstake", "0");
         setArg("-avaminquorumconnectedstakeratio", "0");
         setArg("-avaminavaproofsnodecount", "0");
         setArg("-avaproofstakeutxoconfirmations", "1");
         bilingual_str error;
         m_processor = Processor::MakeProcessor(
             *m_node.args, *m_node.chain, m_node.connman.get(),
             *Assert(m_node.chainman), m_node.mempool.get(), *m_node.scheduler,
             error);
         BOOST_CHECK(m_processor);
 
         m_node.peerman = ::PeerManager::make(
             *m_connman, *m_node.addrman, m_node.banman.get(), *m_node.chainman,
-            *m_node.mempool, false);
+            *m_node.mempool, m_processor.get(), false);
         m_node.chain = interfaces::MakeChain(m_node, config.GetChainParams());
     }
 
     ~AvalancheTestingSetup() {
         m_connman->ClearNodes();
         SyncWithValidationInterfaceQueue();
 
         ArgsManager &argsman = *Assert(m_node.args);
         for (const std::string &key : m_overridden_args) {
             argsman.ClearForcedArg(key);
         }
         m_overridden_args.clear();
     }
 
     CNode *ConnectNode(ServiceFlags nServices) {
         static NodeId id = 0;
 
         CAddress addr(ip(GetRand<uint32_t>()), NODE_NONE);
         auto node =
             new CNode(id++, INVALID_SOCKET, addr,
                       /* nKeyedNetGroupIn */ 0,
                       /* nLocalHostNonceIn */ 0,
                       /* nLocalExtraEntropyIn */ 0, CAddress(),
                       /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY,
                       /* inbound_onion */ false);
         node->SetCommonVersion(PROTOCOL_VERSION);
         node->m_has_all_wanted_services =
             HasAllDesirableServiceFlags(nServices);
         m_node.peerman->InitializeNode(config, *node, NODE_NETWORK);
         node->nVersion = 1;
         node->fSuccessfullyConnected = true;
 
         m_connman->AddNode(*node);
         return node;
     }
 
     ProofRef GetProof(CScript payoutScript = UNSPENDABLE_ECREG_PAYOUT_SCRIPT) {
         const CKey key = CKey::MakeCompressedKey();
         const COutPoint outpoint{TxId(GetRandHash()), 0};
         CScript script = GetScriptForDestination(PKHash(key.GetPubKey()));
         const Amount amount = PROOF_DUST_THRESHOLD;
         const uint32_t height = 100;
 
         LOCK(cs_main);
         CCoinsViewCache &coins =
             Assert(m_node.chainman)->ActiveChainstate().CoinsTip();
         coins.AddCoin(outpoint, Coin(CTxOut(amount, script), height, false),
                       false);
 
         ProofBuilder pb(0, 0, masterpriv, payoutScript);
         BOOST_CHECK(pb.addUTXO(outpoint, amount, height, false, key));
         return pb.build();
     }
 
     bool addNode(NodeId nodeid, const ProofId &proofid) {
         return m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             return pm.addNode(nodeid, proofid);
         });
     }
 
     bool addNode(NodeId nodeid) {
         auto proof = GetProof();
         return m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             return pm.registerProof(proof) &&
                    pm.addNode(nodeid, proof->getId());
         });
     }
 
     std::array<CNode *, 8> ConnectNodes() {
         auto proof = GetProof();
         BOOST_CHECK(
             m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
                 return pm.registerProof(proof);
             }));
         const ProofId &proofid = proof->getId();
 
         std::array<CNode *, 8> nodes;
         for (CNode *&n : nodes) {
             n = ConnectNode(NODE_AVALANCHE);
             BOOST_CHECK(addNode(n->GetId(), proofid));
         }
 
         return nodes;
     }
 
     void runEventLoop() { AvalancheTest::runEventLoop(*m_processor); }
 
     NodeId getSuitableNodeToQuery() {
         return AvalancheTest::getSuitableNodeToQuery(*m_processor);
     }
 
     std::vector<CInv> getInvsForNextPoll() {
         return AvalancheTest::getInvsForNextPoll(*m_processor);
     }
 
     uint64_t getRound() const { return AvalancheTest::getRound(*m_processor); }
 
     bool registerVotes(NodeId nodeid, const avalanche::Response &response,
                        std::vector<avalanche::VoteItemUpdate> &updates,
                        std::string &error) {
         int banscore;
         return m_processor->registerVotes(nodeid, response, updates, banscore,
                                           error);
     }
 
     bool registerVotes(NodeId nodeid, const avalanche::Response &response,
                        std::vector<avalanche::VoteItemUpdate> &updates) {
         int banscore;
         std::string error;
         return m_processor->registerVotes(nodeid, response, updates, banscore,
                                           error);
     }
 
     void setArg(std::string key, std::string value) {
         ArgsManager &argsman = *Assert(m_node.args);
         argsman.ForceSetArg(key, std::move(value));
         m_overridden_args.emplace(std::move(key));
     }
 
     bool addToReconcile(const AnyVoteItem &item) {
         return m_processor->addToReconcile(item);
     }
 };
 
 struct BlockProvider {
     AvalancheTestingSetup *fixture;
     uint32_t invType;
 
     BlockProvider(AvalancheTestingSetup *_fixture)
         : fixture(_fixture), invType(MSG_BLOCK) {}
 
     CBlockIndex *buildVoteItem() const {
         CBlock block = fixture->CreateAndProcessBlock({}, CScript());
         const BlockHash blockHash = block.GetHash();
 
         LOCK(cs_main);
         return Assert(fixture->m_node.chainman)
             ->m_blockman.LookupBlockIndex(blockHash);
     }
 
     uint256 getVoteItemId(const CBlockIndex *pindex) const {
         return pindex->GetBlockHash();
     }
 
     std::vector<Vote> buildVotesForItems(uint32_t error,
                                          std::vector<CBlockIndex *> &&items) {
         size_t numItems = items.size();
 
         std::vector<Vote> votes;
         votes.reserve(numItems);
 
         // Votes are sorted by most work first
         std::sort(items.begin(), items.end(), CBlockIndexWorkComparator());
         for (auto &item : reverse_iterate(items)) {
             votes.emplace_back(error, item->GetBlockHash());
         }
 
         return votes;
     }
 
     void invalidateItem(CBlockIndex *pindex) {
         LOCK(::cs_main);
         pindex->nStatus = pindex->nStatus.withFailed();
     }
 
     const CBlockIndex *fromAnyVoteItem(const AnyVoteItem &item) {
         return std::get<const CBlockIndex *>(item);
     }
 };
 
 struct ProofProvider {
     AvalancheTestingSetup *fixture;
     uint32_t invType;
 
     ProofProvider(AvalancheTestingSetup *_fixture)
         : fixture(_fixture), invType(MSG_AVA_PROOF) {}
 
     ProofRef buildVoteItem() const {
         const ProofRef proof = fixture->GetProof();
         fixture->m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             BOOST_CHECK(pm.registerProof(proof));
         });
         return proof;
     }
 
     uint256 getVoteItemId(const ProofRef &proof) const {
         return proof->getId();
     }
 
     std::vector<Vote> buildVotesForItems(uint32_t error,
                                          std::vector<ProofRef> &&items) {
         size_t numItems = items.size();
 
         std::vector<Vote> votes;
         votes.reserve(numItems);
 
         // Votes are sorted by high score first
         std::sort(items.begin(), items.end(), ProofComparatorByScore());
         for (auto &item : items) {
             votes.emplace_back(error, item->getId());
         }
 
         return votes;
     }
 
     void invalidateItem(const ProofRef &proof) {
         fixture->m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             pm.rejectProof(proof->getId(),
                            avalanche::PeerManager::RejectionMode::INVALIDATE);
         });
     }
 
     const ProofRef fromAnyVoteItem(const AnyVoteItem &item) {
         return std::get<const ProofRef>(item);
     }
 };
 
 struct TxProvider {
     AvalancheTestingSetup *fixture;
 
     std::vector<avalanche::VoteItemUpdate> updates;
     uint32_t invType;
 
     TxProvider(AvalancheTestingSetup *_fixture)
         : fixture(_fixture), invType(MSG_TX) {}
 
     CTransactionRef buildVoteItem() const {
         auto rng = FastRandomContext();
         CMutableTransaction mtx;
         mtx.nVersion = 2;
         mtx.vin.emplace_back(COutPoint{TxId(rng.rand256()), 0});
         mtx.vout.emplace_back(10 * COIN, CScript() << OP_TRUE);
 
         CTransactionRef tx = MakeTransactionRef(std::move(mtx));
 
         TestMemPoolEntryHelper mempoolEntryHelper;
         auto entry = mempoolEntryHelper.Fee(int64_t(rng.randrange(10)) * COIN)
                          .FromTx(tx);
 
         CTxMemPool *mempool = Assert(fixture->m_node.mempool.get());
         {
             LOCK2(cs_main, mempool->cs);
             mempool->addUnchecked(entry);
             BOOST_CHECK(mempool->exists(tx->GetId()));
         }
 
         return tx;
     }
 
     uint256 getVoteItemId(const CTransactionRef &tx) const {
         return tx->GetId();
     }
 
     std::vector<Vote> buildVotesForItems(uint32_t error,
                                          std::vector<CTransactionRef> &&items) {
         size_t numItems = items.size();
 
         std::vector<Vote> votes;
         votes.reserve(numItems);
 
         CTxMemPool *mempool = Assert(fixture->m_node.mempool.get());
 
         {
             LOCK(mempool->cs);
 
             // Transactions are sorted by modified fee rate as long as they are
             // in the mempool. Let's keep it simple here and assume it's the
             // case.
             std::sort(items.begin(), items.end(),
                       [mempool](const CTransactionRef &lhs,
                                 const CTransactionRef &rhs)
                           EXCLUSIVE_LOCKS_REQUIRED(mempool->cs) {
                               auto lhsIter = mempool->GetIter(lhs->GetId());
                               auto rhsIter = mempool->GetIter(rhs->GetId());
                               BOOST_CHECK(lhsIter);
                               BOOST_CHECK(rhsIter);
 
                               return CompareTxMemPoolEntryByModifiedFeeRate{}(
                                   **lhsIter, **rhsIter);
                           });
         }
 
         for (auto &item : items) {
             votes.emplace_back(error, item->GetId());
         }
 
         return votes;
     }
 
     void invalidateItem(const CTransactionRef &tx) {
         BOOST_CHECK(tx != nullptr);
         CTxMemPool *mempool = Assert(fixture->m_node.mempool.get());
 
         LOCK(mempool->cs);
         mempool->removeRecursive(*tx, MemPoolRemovalReason::CONFLICT);
         BOOST_CHECK(!mempool->exists(tx->GetId()));
     }
 
     const CTransactionRef fromAnyVoteItem(const AnyVoteItem &item) {
         return std::get<const CTransactionRef>(item);
     }
 };
 
 } // namespace
 
 BOOST_FIXTURE_TEST_SUITE(processor_tests, AvalancheTestingSetup)
 
 // FIXME A std::tuple can be used instead of boost::mpl::list after boost 1.67
 using VoteItemProviders =
     boost::mpl::list<BlockProvider, ProofProvider, TxProvider>;
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(voteitemupdate, P, VoteItemProviders) {
     P provider(this);
 
     std::set<VoteStatus> status{
         VoteStatus::Invalid,   VoteStatus::Rejected, VoteStatus::Accepted,
         VoteStatus::Finalized, VoteStatus::Stale,
     };
 
     auto item = provider.buildVoteItem();
 
     for (auto s : status) {
         VoteItemUpdate itemUpdate(item, s);
         // The use of BOOST_CHECK instead of BOOST_CHECK_EQUAL prevents from
         // having to define operator<<() for each argument type.
         BOOST_CHECK(provider.fromAnyVoteItem(itemUpdate.getVoteItem()) == item);
         BOOST_CHECK(itemUpdate.getStatus() == s);
     }
 }
 
 namespace {
 Response next(Response &r) {
     auto copy = r;
     r = {r.getRound() + 1, r.getCooldown(), r.GetVotes()};
     return copy;
 }
 } // namespace
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(item_reconcile_twice, P, VoteItemProviders) {
     P provider(this);
     ChainstateManager &chainman = *Assert(m_node.chainman);
     const CBlockIndex *chaintip =
         WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip());
 
     auto item = provider.buildVoteItem();
     auto itemid = provider.getVoteItemId(item);
 
     // Adding the item twice does nothing.
     BOOST_CHECK(addToReconcile(item));
     BOOST_CHECK(!addToReconcile(item));
     BOOST_CHECK(m_processor->isAccepted(item));
 
     // Create nodes that supports avalanche so we can finalize the item.
     auto avanodes = ConnectNodes();
 
     int nextNodeIndex = 0;
     std::vector<avalanche::VoteItemUpdate> updates;
     auto registerNewVote = [&](const Response &resp) {
         runEventLoop();
         auto nodeid = avanodes[nextNodeIndex++ % avanodes.size()]->GetId();
         BOOST_CHECK(registerVotes(nodeid, resp, updates));
     };
 
     // Finalize the item.
     auto finalize = [&](const auto finalizeItemId) {
         Response resp = {getRound(), 0, {Vote(0, finalizeItemId)}};
         for (int i = 0; i < AVALANCHE_FINALIZATION_SCORE + 6; i++) {
             registerNewVote(next(resp));
             if (updates.size() > 0) {
                 break;
             }
         }
         BOOST_CHECK_EQUAL(updates.size(), 1);
         BOOST_CHECK(updates[0].getStatus() == VoteStatus::Finalized);
         updates.clear();
     };
     finalize(itemid);
 
     // The finalized item cannot be reconciled for a while.
     BOOST_CHECK(!addToReconcile(item));
 
     auto finalizeNewItem = [&]() {
         auto anotherItem = provider.buildVoteItem();
         AnyVoteItem anotherVoteItem = AnyVoteItem(anotherItem);
         auto anotherItemId = provider.getVoteItemId(anotherItem);
 
         TestVoteRecord voteRecord(AVALANCHE_FINALIZATION_SCORE - 1);
         AvalancheTest::addVoteRecord(*m_processor, anotherVoteItem, voteRecord);
         finalize(anotherItemId);
     };
 
     // The filter can have new items added up to its size and the item will
     // still not reconcile.
     for (uint32_t i = 0; i < AVALANCHE_FINALIZED_ITEMS_FILTER_NUM_ELEMENTS;
          i++) {
         finalizeNewItem();
         BOOST_CHECK(!addToReconcile(item));
     }
 
     // But if we keep going it will eventually roll out of the filter and can
     // be reconciled again.
     for (uint32_t i = 0; i < AVALANCHE_FINALIZED_ITEMS_FILTER_NUM_ELEMENTS;
          i++) {
         finalizeNewItem();
     }
 
     // Roll back the finalization point so that reconciling the old block does
     // not fail the finalization check. This is a no-op for other types.
     AvalancheTest::setFinalizationTip(*m_processor, chaintip);
 
     BOOST_CHECK(addToReconcile(item));
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(item_null, P, VoteItemProviders) {
     P provider(this);
 
     // Check that null case is handled on the public interface
     BOOST_CHECK(!m_processor->isAccepted(nullptr));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(nullptr), -1);
 
     auto item = decltype(provider.buildVoteItem())();
     BOOST_CHECK(item == nullptr);
     BOOST_CHECK(!addToReconcile(item));
 
     // Check that adding item to vote on doesn't change the outcome. A
     // comparator is used under the hood, and this is skipped if there are no
     // vote records.
     item = provider.buildVoteItem();
     BOOST_CHECK(addToReconcile(item));
 
     BOOST_CHECK(!m_processor->isAccepted(nullptr));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(nullptr), -1);
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(vote_item_register, P, VoteItemProviders) {
     P provider(this);
     const uint32_t invType = provider.invType;
 
     auto item = provider.buildVoteItem();
     auto itemid = provider.getVoteItemId(item);
 
     // Create nodes that supports avalanche.
     auto avanodes = ConnectNodes();
 
     // Querying for random item returns false.
     BOOST_CHECK(!m_processor->isAccepted(item));
 
     // Add a new item. Check it is added to the polls.
     BOOST_CHECK(addToReconcile(item));
     auto invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemid);
 
     BOOST_CHECK(m_processor->isAccepted(item));
 
     int nextNodeIndex = 0;
     std::vector<avalanche::VoteItemUpdate> updates;
     auto registerNewVote = [&](const Response &resp) {
         runEventLoop();
         auto nodeid = avanodes[nextNodeIndex++ % avanodes.size()]->GetId();
         BOOST_CHECK(registerVotes(nodeid, resp, updates));
     };
 
     // Let's vote for this item a few times.
     Response resp{0, 0, {Vote(0, itemid)}};
     for (int i = 0; i < 6; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(m_processor->isAccepted(item));
         BOOST_CHECK_EQUAL(m_processor->getConfidence(item), 0);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // A single neutral vote do not change anything.
     resp = {getRound(), 0, {Vote(-1, itemid)}};
     registerNewVote(next(resp));
     BOOST_CHECK(m_processor->isAccepted(item));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(item), 0);
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     resp = {getRound(), 0, {Vote(0, itemid)}};
     for (int i = 1; i < 7; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(m_processor->isAccepted(item));
         BOOST_CHECK_EQUAL(m_processor->getConfidence(item), i);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Two neutral votes will stall progress.
     resp = {getRound(), 0, {Vote(-1, itemid)}};
     registerNewVote(next(resp));
     BOOST_CHECK(m_processor->isAccepted(item));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(item), 6);
     BOOST_CHECK_EQUAL(updates.size(), 0);
     registerNewVote(next(resp));
     BOOST_CHECK(m_processor->isAccepted(item));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(item), 6);
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     resp = {getRound(), 0, {Vote(0, itemid)}};
     for (int i = 2; i < 8; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(m_processor->isAccepted(item));
         BOOST_CHECK_EQUAL(m_processor->getConfidence(item), 6);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // We vote for it numerous times to finalize it.
     for (int i = 7; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(m_processor->isAccepted(item));
         BOOST_CHECK_EQUAL(m_processor->getConfidence(item), i);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // As long as it is not finalized, we poll.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemid);
 
     // Now finalize the decision.
     registerNewVote(next(resp));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(provider.fromAnyVoteItem(updates[0].getVoteItem()) == item);
     BOOST_CHECK(updates[0].getStatus() == VoteStatus::Finalized);
     updates.clear();
 
     // Once the decision is finalized, there is no poll for it.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 0);
 
     // Get a new item to vote on
     item = provider.buildVoteItem();
     itemid = provider.getVoteItemId(item);
     BOOST_CHECK(addToReconcile(item));
 
     // Now let's finalize rejection.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemid);
 
     resp = {getRound(), 0, {Vote(1, itemid)}};
     for (int i = 0; i < 6; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(m_processor->isAccepted(item));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Now the state will flip.
     registerNewVote(next(resp));
     BOOST_CHECK(!m_processor->isAccepted(item));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(provider.fromAnyVoteItem(updates[0].getVoteItem()) == item);
     BOOST_CHECK(updates[0].getStatus() == VoteStatus::Rejected);
     updates.clear();
 
     // Now it is rejected, but we can vote for it numerous times.
     for (int i = 1; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         registerNewVote(next(resp));
         BOOST_CHECK(!m_processor->isAccepted(item));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // As long as it is not finalized, we poll.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemid);
 
     // Now finalize the decision.
     registerNewVote(next(resp));
     BOOST_CHECK(!m_processor->isAccepted(item));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(provider.fromAnyVoteItem(updates[0].getVoteItem()) == item);
     BOOST_CHECK(updates[0].getStatus() == VoteStatus::Invalid);
     updates.clear();
 
     // Once the decision is finalized, there is no poll for it.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 0);
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(multi_item_register, P, VoteItemProviders) {
     P provider(this);
     const uint32_t invType = provider.invType;
 
     auto itemA = provider.buildVoteItem();
     auto itemidA = provider.getVoteItemId(itemA);
 
     auto itemB = provider.buildVoteItem();
     auto itemidB = provider.getVoteItemId(itemB);
 
     // Create several nodes that support avalanche.
     auto avanodes = ConnectNodes();
 
     // Querying for random item returns false.
     BOOST_CHECK(!m_processor->isAccepted(itemA));
     BOOST_CHECK(!m_processor->isAccepted(itemB));
 
     // Start voting on item A.
     BOOST_CHECK(addToReconcile(itemA));
     auto invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemidA);
 
     uint64_t round = getRound();
     runEventLoop();
     std::vector<avalanche::VoteItemUpdate> updates;
     BOOST_CHECK(registerVotes(avanodes[0]->GetId(),
                               {round, 0, {Vote(0, itemidA)}}, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Start voting on item B after one vote.
     std::vector<Vote> votes = provider.buildVotesForItems(0, {itemA, itemB});
     Response resp{round + 1, 0, votes};
     BOOST_CHECK(addToReconcile(itemB));
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 2);
 
     // Ensure the inv ordering is as expected
     for (size_t i = 0; i < invs.size(); i++) {
         BOOST_CHECK_EQUAL(invs[i].type, invType);
         BOOST_CHECK(invs[i].hash == votes[i].GetHash());
     }
 
     // Let's vote for these items a few times.
     for (int i = 0; i < 4; i++) {
         NodeId nodeid = getSuitableNodeToQuery();
         runEventLoop();
         BOOST_CHECK(registerVotes(nodeid, next(resp), updates));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Now it is accepted, but we can vote for it numerous times.
     for (int i = 0; i < AVALANCHE_FINALIZATION_SCORE; i++) {
         NodeId nodeid = getSuitableNodeToQuery();
         runEventLoop();
         BOOST_CHECK(registerVotes(nodeid, next(resp), updates));
         BOOST_CHECK_EQUAL(updates.size(), 0);
     }
 
     // Running two iterration of the event loop so that vote gets triggered on A
     // and B.
     NodeId firstNodeid = getSuitableNodeToQuery();
     runEventLoop();
     NodeId secondNodeid = getSuitableNodeToQuery();
     runEventLoop();
 
     BOOST_CHECK(firstNodeid != secondNodeid);
 
     // Next vote will finalize item A.
     BOOST_CHECK(registerVotes(firstNodeid, next(resp), updates));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(provider.fromAnyVoteItem(updates[0].getVoteItem()) == itemA);
     BOOST_CHECK(updates[0].getStatus() == VoteStatus::Finalized);
     updates.clear();
 
     // We do not vote on A anymore.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemidB);
 
     // Next vote will finalize item B.
     BOOST_CHECK(registerVotes(secondNodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 1);
     BOOST_CHECK(provider.fromAnyVoteItem(updates[0].getVoteItem()) == itemB);
     BOOST_CHECK(updates[0].getStatus() == VoteStatus::Finalized);
     updates.clear();
 
     // There is nothing left to vote on.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 0);
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(poll_and_response, P, VoteItemProviders) {
     P provider(this);
     const uint32_t invType = provider.invType;
 
     auto item = provider.buildVoteItem();
     auto itemid = provider.getVoteItemId(item);
 
     // There is no node to query.
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), NO_NODE);
 
     // Add enough nodes to have a valid quorum, and the same amount with no
     // avalanche support
     std::set<NodeId> avanodeIds;
     auto avanodes = ConnectNodes();
     for (auto avanode : avanodes) {
         ConnectNode(NODE_NONE);
         avanodeIds.insert(avanode->GetId());
     }
 
     auto getSelectedAvanodeId = [&]() {
         NodeId avanodeid = getSuitableNodeToQuery();
         BOOST_CHECK(avanodeIds.find(avanodeid) != avanodeIds.end());
         return avanodeid;
     };
 
     // It returns one of the avalanche peer.
     NodeId avanodeid = getSelectedAvanodeId();
 
     // Register an item and check it is added to the list of elements to poll.
     BOOST_CHECK(addToReconcile(item));
     auto invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemid);
 
     std::set<NodeId> unselectedNodeids = avanodeIds;
     unselectedNodeids.erase(avanodeid);
     const size_t remainingNodeIds = unselectedNodeids.size();
 
     uint64_t round = getRound();
     for (size_t i = 0; i < remainingNodeIds; i++) {
         // Trigger a poll on avanode.
         runEventLoop();
 
         // Another node is selected
         NodeId nodeid = getSuitableNodeToQuery();
         BOOST_CHECK(unselectedNodeids.find(nodeid) != avanodeIds.end());
         unselectedNodeids.erase(nodeid);
     }
 
     // There is no more suitable peer available, so return nothing.
     BOOST_CHECK(unselectedNodeids.empty());
     runEventLoop();
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), NO_NODE);
 
     // Respond to the request.
     Response resp = {round, 0, {Vote(0, itemid)}};
     std::vector<avalanche::VoteItemUpdate> updates;
     BOOST_CHECK(registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Now that avanode fullfilled his request, it is added back to the list of
     // queriable nodes.
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 
     auto checkRegisterVotesError = [&](NodeId nodeid,
                                        const avalanche::Response &response,
                                        const std::string &expectedError) {
         std::string error;
         BOOST_CHECK(!registerVotes(nodeid, response, updates, error));
         BOOST_CHECK_EQUAL(error, expectedError);
         BOOST_CHECK_EQUAL(updates.size(), 0);
     };
 
     // Sending a response when not polled fails.
     checkRegisterVotesError(avanodeid, next(resp), "unexpected-ava-response");
 
     // Trigger a poll on avanode.
     round = getRound();
     runEventLoop();
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), NO_NODE);
 
     // Sending responses that do not match the request also fails.
     // 1. Too many results.
     resp = {round, 0, {Vote(0, itemid), Vote(0, itemid)}};
     runEventLoop();
     checkRegisterVotesError(avanodeid, resp, "invalid-ava-response-size");
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 
     // 2. Not enough results.
     resp = {getRound(), 0, {}};
     runEventLoop();
     checkRegisterVotesError(avanodeid, resp, "invalid-ava-response-size");
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 
     // 3. Do not match the poll.
     resp = {getRound(), 0, {Vote()}};
     runEventLoop();
     checkRegisterVotesError(avanodeid, resp, "invalid-ava-response-content");
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 
     // At this stage we have reached the max inflight requests for our inv, so
     // it won't be requested anymore until the requests are fullfilled. Let's
     // vote on another item with no inflight request so the remaining tests
     // makes sense.
     invs = getInvsForNextPoll();
     BOOST_CHECK(invs.empty());
 
     item = provider.buildVoteItem();
     itemid = provider.getVoteItemId(item);
     BOOST_CHECK(addToReconcile(item));
 
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
 
     // 4. Invalid round count. Request is not discarded.
     uint64_t queryRound = getRound();
     runEventLoop();
 
     resp = {queryRound + 1, 0, {Vote()}};
     checkRegisterVotesError(avanodeid, resp, "unexpected-ava-response");
 
     resp = {queryRound - 1, 0, {Vote()}};
     checkRegisterVotesError(avanodeid, resp, "unexpected-ava-response");
 
     // 5. Making request for invalid nodes do not work. Request is not
     // discarded.
     resp = {queryRound, 0, {Vote(0, itemid)}};
     checkRegisterVotesError(avanodeid + 1234, resp, "unexpected-ava-response");
 
     // Proper response gets processed and avanode is available again.
     resp = {queryRound, 0, {Vote(0, itemid)}};
     BOOST_CHECK(registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 
     // Out of order response are rejected.
     const auto item2 = provider.buildVoteItem();
     BOOST_CHECK(addToReconcile(item2));
 
     std::vector<Vote> votes = provider.buildVotesForItems(0, {item, item2});
     resp = {getRound(), 0, {votes[1], votes[0]}};
     runEventLoop();
     checkRegisterVotesError(avanodeid, resp, "invalid-ava-response-content");
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 
     // But they are accepted in order.
     resp = {getRound(), 0, votes};
     runEventLoop();
     BOOST_CHECK(registerVotes(avanodeid, resp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), avanodeid);
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(dont_poll_invalid_item, P, VoteItemProviders) {
     P provider(this);
     const uint32_t invType = provider.invType;
 
     auto itemA = provider.buildVoteItem();
     auto itemB = provider.buildVoteItem();
 
     auto avanodes = ConnectNodes();
 
     // Build votes to get proper ordering
     std::vector<Vote> votes = provider.buildVotesForItems(0, {itemA, itemB});
 
     // Register the items and check they are added to the list of elements to
     // poll.
     BOOST_CHECK(addToReconcile(itemA));
     BOOST_CHECK(addToReconcile(itemB));
     auto invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 2);
     for (size_t i = 0; i < invs.size(); i++) {
         BOOST_CHECK_EQUAL(invs[i].type, invType);
         BOOST_CHECK(invs[i].hash == votes[i].GetHash());
     }
 
     // When an item is marked invalid, stop polling.
     provider.invalidateItem(itemB);
 
     Response goodResp{getRound(), 0, {Vote(0, provider.getVoteItemId(itemA))}};
     std::vector<avalanche::VoteItemUpdate> updates;
     runEventLoop();
     BOOST_CHECK(registerVotes(avanodes[0]->GetId(), goodResp, updates));
     BOOST_CHECK_EQUAL(updates.size(), 0);
 
     // Votes including itemB are rejected
     Response badResp{getRound(), 0, votes};
     runEventLoop();
     std::string error;
     BOOST_CHECK(!registerVotes(avanodes[1]->GetId(), badResp, updates, error));
     BOOST_CHECK_EQUAL(error, "invalid-ava-response-size");
 }
 
 BOOST_TEST_DECORATOR(*boost::unit_test::timeout(60))
 BOOST_AUTO_TEST_CASE_TEMPLATE(poll_inflight_timeout, P, VoteItemProviders) {
     P provider(this);
     ChainstateManager &chainman = *Assert(m_node.chainman);
 
     auto queryTimeDuration = std::chrono::milliseconds(10);
     setArg("-avatimeout", ToString(queryTimeDuration.count()));
 
     bilingual_str error;
     m_processor = Processor::MakeProcessor(
         *m_node.args, *m_node.chain, m_node.connman.get(), chainman,
         m_node.mempool.get(), *m_node.scheduler, error);
 
     const auto item = provider.buildVoteItem();
     const auto itemid = provider.getVoteItemId(item);
 
     // Add the item
     BOOST_CHECK(addToReconcile(item));
 
     // Create a quorum of nodes that support avalanche.
     ConnectNodes();
     NodeId avanodeid = NO_NODE;
 
     // Expire requests after some time.
     for (int i = 0; i < 10; i++) {
         Response resp = {getRound(), 0, {Vote(0, itemid)}};
         avanodeid = getSuitableNodeToQuery();
 
         auto start = Now<SteadyMilliseconds>();
         runEventLoop();
         // We cannot guarantee that we'll wait for just 1ms, so we have to bail
         // if we aren't within the proper time range.
         std::this_thread::sleep_for(std::chrono::milliseconds(1));
         runEventLoop();
 
         std::vector<avalanche::VoteItemUpdate> updates;
         bool ret = registerVotes(avanodeid, next(resp), updates);
         if (Now<SteadyMilliseconds>() > start + queryTimeDuration) {
             // We waited for too long, bail. Because we can't know for sure when
             // previous steps ran, ret is not deterministic and we do not check
             // it.
             i--;
             continue;
         }
 
         // We are within time bounds, so the vote should have worked.
         BOOST_CHECK(ret);
 
         avanodeid = getSuitableNodeToQuery();
 
         // Now try again but wait for expiration.
         runEventLoop();
         std::this_thread::sleep_for(queryTimeDuration);
         runEventLoop();
         BOOST_CHECK(!registerVotes(avanodeid, next(resp), updates));
     }
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(poll_inflight_count, P, VoteItemProviders) {
     P provider(this);
     const uint32_t invType = provider.invType;
 
     // Create enough nodes so that we run into the inflight request limit.
     auto proof = GetProof();
     BOOST_CHECK(m_processor->withPeerManager(
         [&](avalanche::PeerManager &pm) { return pm.registerProof(proof); }));
 
     std::array<CNode *, AVALANCHE_MAX_INFLIGHT_POLL + 1> nodes;
     for (auto &n : nodes) {
         n = ConnectNode(NODE_AVALANCHE);
         BOOST_CHECK(addNode(n->GetId(), proof->getId()));
     }
 
     // Add an item to poll
     const auto item = provider.buildVoteItem();
     const auto itemid = provider.getVoteItemId(item);
     BOOST_CHECK(addToReconcile(item));
 
     // Ensure there are enough requests in flight.
     std::map<NodeId, uint64_t> node_round_map;
     for (int i = 0; i < AVALANCHE_MAX_INFLIGHT_POLL; i++) {
         NodeId nodeid = getSuitableNodeToQuery();
         BOOST_CHECK(node_round_map.find(nodeid) == node_round_map.end());
         node_round_map.insert(std::pair<NodeId, uint64_t>(nodeid, getRound()));
         auto invs = getInvsForNextPoll();
         BOOST_CHECK_EQUAL(invs.size(), 1);
         BOOST_CHECK_EQUAL(invs[0].type, invType);
         BOOST_CHECK(invs[0].hash == itemid);
         runEventLoop();
     }
 
     // Now that we have enough in flight requests, we shouldn't poll.
     auto suitablenodeid = getSuitableNodeToQuery();
     BOOST_CHECK(suitablenodeid != NO_NODE);
     auto invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 0);
     runEventLoop();
     BOOST_CHECK_EQUAL(getSuitableNodeToQuery(), suitablenodeid);
 
     // Send one response, now we can poll again.
     auto it = node_round_map.begin();
     Response resp = {it->second, 0, {Vote(0, itemid)}};
     std::vector<avalanche::VoteItemUpdate> updates;
     BOOST_CHECK(registerVotes(it->first, resp, updates));
     node_round_map.erase(it);
 
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, invType);
     BOOST_CHECK(invs[0].hash == itemid);
 }
 
 BOOST_AUTO_TEST_CASE(quorum_diversity) {
     std::vector<VoteItemUpdate> updates;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex =
             Assert(m_node.chainman)->m_blockman.LookupBlockIndex(blockHash);
     }
 
     // Create nodes that supports avalanche.
     auto avanodes = ConnectNodes();
 
     // Querying for random block returns false.
     BOOST_CHECK(!m_processor->isAccepted(pindex));
 
     // Add a new block. Check it is added to the polls.
     BOOST_CHECK(m_processor->addToReconcile(pindex));
 
     // Do one valid round of voting.
     uint64_t round = getRound();
     Response resp{round, 0, {Vote(0, blockHash)}};
 
     // Check that all nodes can vote.
     for (size_t i = 0; i < avanodes.size(); i++) {
         runEventLoop();
         BOOST_CHECK(registerVotes(avanodes[i]->GetId(), next(resp), updates));
     }
 
     // Generate a query for every single node.
     const NodeId firstNodeId = getSuitableNodeToQuery();
     std::map<NodeId, uint64_t> node_round_map;
     round = getRound();
     for (size_t i = 0; i < avanodes.size(); i++) {
         NodeId nodeid = getSuitableNodeToQuery();
         BOOST_CHECK(node_round_map.find(nodeid) == node_round_map.end());
         node_round_map[nodeid] = getRound();
         runEventLoop();
     }
 
     // Now only the first node can vote. All others would be duplicate in the
     // quorum.
     auto confidence = m_processor->getConfidence(pindex);
     BOOST_REQUIRE(confidence > 0);
 
     for (auto &[nodeid, r] : node_round_map) {
         if (nodeid == firstNodeId) {
             // Node 0 is the only one which can vote at this stage.
             round = r;
             continue;
         }
 
         BOOST_CHECK(
             registerVotes(nodeid, {r, 0, {Vote(0, blockHash)}}, updates));
         BOOST_CHECK_EQUAL(m_processor->getConfidence(pindex), confidence);
     }
 
     BOOST_CHECK(
         registerVotes(firstNodeId, {round, 0, {Vote(0, blockHash)}}, updates));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(pindex), confidence + 1);
 }
 
 BOOST_AUTO_TEST_CASE(event_loop) {
     CScheduler s;
 
     CBlock block = CreateAndProcessBlock({}, CScript());
     const BlockHash blockHash = block.GetHash();
     const CBlockIndex *pindex;
     {
         LOCK(cs_main);
         pindex =
             Assert(m_node.chainman)->m_blockman.LookupBlockIndex(blockHash);
     }
 
     // Starting the event loop.
     BOOST_CHECK(m_processor->startEventLoop(s));
 
     // There is one task planned in the next hour (our event loop).
     std::chrono::steady_clock::time_point start, stop;
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 1);
 
     // Starting twice doesn't start it twice.
     BOOST_CHECK(!m_processor->startEventLoop(s));
 
     // Start the scheduler thread.
     std::thread schedulerThread(std::bind(&CScheduler::serviceQueue, &s));
 
     // Create a quorum of nodes that support avalanche.
     auto avanodes = ConnectNodes();
 
     // There is no query in flight at the moment.
     NodeId nodeid = getSuitableNodeToQuery();
     BOOST_CHECK_NE(nodeid, NO_NODE);
 
     // Add a new block. Check it is added to the polls.
     uint64_t queryRound = getRound();
     BOOST_CHECK(m_processor->addToReconcile(pindex));
 
     // Wait until all nodes got a poll
     for (int i = 0; i < 60 * 1000; i++) {
         // Technically, this is a race condition, but this should do just fine
         // as we wait up to 1 minute for an event that should take 80ms.
         UninterruptibleSleep(std::chrono::milliseconds(1));
         if (getRound() == queryRound + avanodes.size()) {
             break;
         }
     }
 
     // Check that we effectively got a request and not timed out.
     BOOST_CHECK(getRound() > queryRound);
 
     // Respond and check the cooldown time is respected.
     uint64_t responseRound = getRound();
     auto queryTime = Now<SteadyMilliseconds>() + std::chrono::milliseconds(100);
 
     std::vector<VoteItemUpdate> updates;
     // Only the first node answers, so it's the only one that gets polled again
     BOOST_CHECK(registerVotes(nodeid, {queryRound, 100, {Vote(0, blockHash)}},
                               updates));
 
     for (int i = 0; i < 10000; i++) {
         // We make sure that we do not get a request before queryTime.
         UninterruptibleSleep(std::chrono::milliseconds(1));
         if (getRound() != responseRound) {
             BOOST_CHECK(Now<SteadyMilliseconds>() >= queryTime);
             break;
         }
     }
 
     // But we eventually get one.
     BOOST_CHECK(getRound() > responseRound);
 
     // Stop event loop.
     BOOST_CHECK(m_processor->stopEventLoop());
 
     // We don't have any task scheduled anymore.
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 0);
 
     // Can't stop the event loop twice.
     BOOST_CHECK(!m_processor->stopEventLoop());
 
     // Wait for the scheduler to stop.
     s.StopWhenDrained();
     schedulerThread.join();
 }
 
 BOOST_AUTO_TEST_CASE(destructor) {
     CScheduler s;
     std::chrono::steady_clock::time_point start, stop;
 
     std::thread schedulerThread;
     BOOST_CHECK(m_processor->startEventLoop(s));
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 1);
 
     // Start the service thread after the queue size check to prevent a race
     // condition where the thread may be processing the event loop task during
     // the check.
     schedulerThread = std::thread(std::bind(&CScheduler::serviceQueue, &s));
 
     // Destroy the processor.
     m_processor.reset();
 
     // Now that avalanche is destroyed, there is no more scheduled tasks.
     BOOST_CHECK_EQUAL(s.getQueueInfo(start, stop), 0);
 
     // Wait for the scheduler to stop.
     s.StopWhenDrained();
     schedulerThread.join();
 }
 
 BOOST_AUTO_TEST_CASE(add_proof_to_reconcile) {
     uint32_t score = MIN_VALID_PROOF_SCORE;
     Chainstate &active_chainstate = Assert(m_node.chainman)->ActiveChainstate();
 
     auto addProofToReconcile = [&](uint32_t proofScore) {
         auto proof = buildRandomProof(active_chainstate, proofScore);
         m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             BOOST_CHECK(pm.registerProof(proof));
         });
         BOOST_CHECK(m_processor->addToReconcile(proof));
         return proof;
     };
 
     for (size_t i = 0; i < AVALANCHE_MAX_ELEMENT_POLL; i++) {
         auto proof = addProofToReconcile(++score);
 
         auto invs = AvalancheTest::getInvsForNextPoll(*m_processor);
         BOOST_CHECK_EQUAL(invs.size(), i + 1);
         BOOST_CHECK(invs.front().IsMsgProof());
         BOOST_CHECK_EQUAL(invs.front().hash, proof->getId());
     }
 
     // From here a new proof is only polled if its score is in the top
     // AVALANCHE_MAX_ELEMENT_POLL
     ProofId lastProofId;
     for (size_t i = 0; i < 10; i++) {
         auto proof = addProofToReconcile(++score);
 
         auto invs = AvalancheTest::getInvsForNextPoll(*m_processor);
         BOOST_CHECK_EQUAL(invs.size(), AVALANCHE_MAX_ELEMENT_POLL);
         BOOST_CHECK(invs.front().IsMsgProof());
         BOOST_CHECK_EQUAL(invs.front().hash, proof->getId());
 
         lastProofId = proof->getId();
     }
 
     for (size_t i = 0; i < 10; i++) {
         auto proof = addProofToReconcile(--score);
 
         auto invs = AvalancheTest::getInvsForNextPoll(*m_processor);
         BOOST_CHECK_EQUAL(invs.size(), AVALANCHE_MAX_ELEMENT_POLL);
         BOOST_CHECK(invs.front().IsMsgProof());
         BOOST_CHECK_EQUAL(invs.front().hash, lastProofId);
     }
 
     {
         // The score is not high enough to get polled
         auto proof = addProofToReconcile(--score);
         auto invs = AvalancheTest::getInvsForNextPoll(*m_processor);
         for (auto &inv : invs) {
             BOOST_CHECK_NE(inv.hash, proof->getId());
         }
     }
 }
 
 BOOST_AUTO_TEST_CASE(proof_record) {
     setArg("-avaproofstakeutxoconfirmations", "2");
     setArg("-avalancheconflictingproofcooldown", "0");
 
     BOOST_CHECK(!m_processor->isAccepted(nullptr));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(nullptr), -1);
 
     const CKey key = CKey::MakeCompressedKey();
 
     const COutPoint conflictingOutpoint{TxId(GetRandHash()), 0};
     const COutPoint immatureOutpoint{TxId(GetRandHash()), 0};
     {
         CScript script = GetScriptForDestination(PKHash(key.GetPubKey()));
 
         LOCK(cs_main);
         CCoinsViewCache &coins =
             Assert(m_node.chainman)->ActiveChainstate().CoinsTip();
         coins.AddCoin(conflictingOutpoint,
                       Coin(CTxOut(PROOF_DUST_THRESHOLD, script), 10, false),
                       false);
         coins.AddCoin(immatureOutpoint,
                       Coin(CTxOut(PROOF_DUST_THRESHOLD, script), 100, false),
                       false);
     }
 
     auto buildProof = [&](const COutPoint &outpoint, uint64_t sequence,
                           uint32_t height = 10) {
         ProofBuilder pb(sequence, 0, key, UNSPENDABLE_ECREG_PAYOUT_SCRIPT);
         BOOST_CHECK(
             pb.addUTXO(outpoint, PROOF_DUST_THRESHOLD, height, false, key));
         return pb.build();
     };
 
     auto conflictingProof = buildProof(conflictingOutpoint, 1);
     auto validProof = buildProof(conflictingOutpoint, 2);
     auto immatureProof = buildProof(immatureOutpoint, 3, 100);
 
     BOOST_CHECK(!m_processor->isAccepted(conflictingProof));
     BOOST_CHECK(!m_processor->isAccepted(validProof));
     BOOST_CHECK(!m_processor->isAccepted(immatureProof));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(conflictingProof), -1);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(validProof), -1);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(immatureProof), -1);
 
     // Reconciling proofs that don't exist will fail
     BOOST_CHECK(!m_processor->addToReconcile(conflictingProof));
     BOOST_CHECK(!m_processor->addToReconcile(validProof));
     BOOST_CHECK(!m_processor->addToReconcile(immatureProof));
 
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(conflictingProof));
         BOOST_CHECK(pm.registerProof(validProof));
         BOOST_CHECK(!pm.registerProof(immatureProof));
 
         BOOST_CHECK(pm.isBoundToPeer(validProof->getId()));
         BOOST_CHECK(pm.isInConflictingPool(conflictingProof->getId()));
         BOOST_CHECK(pm.isImmature(immatureProof->getId()));
     });
 
     BOOST_CHECK(m_processor->addToReconcile(conflictingProof));
     BOOST_CHECK(!m_processor->isAccepted(conflictingProof));
     BOOST_CHECK(!m_processor->isAccepted(validProof));
     BOOST_CHECK(!m_processor->isAccepted(immatureProof));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(conflictingProof), 0);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(validProof), -1);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(immatureProof), -1);
 
     BOOST_CHECK(m_processor->addToReconcile(validProof));
     BOOST_CHECK(!m_processor->isAccepted(conflictingProof));
     BOOST_CHECK(m_processor->isAccepted(validProof));
     BOOST_CHECK(!m_processor->isAccepted(immatureProof));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(conflictingProof), 0);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(validProof), 0);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(immatureProof), -1);
 
     BOOST_CHECK(!m_processor->addToReconcile(immatureProof));
     BOOST_CHECK(!m_processor->isAccepted(conflictingProof));
     BOOST_CHECK(m_processor->isAccepted(validProof));
     BOOST_CHECK(!m_processor->isAccepted(immatureProof));
     BOOST_CHECK_EQUAL(m_processor->getConfidence(conflictingProof), 0);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(validProof), 0);
     BOOST_CHECK_EQUAL(m_processor->getConfidence(immatureProof), -1);
 }
 
 BOOST_AUTO_TEST_CASE(quorum_detection) {
     // Set min quorum parameters for our test
     int minStake = 400'000'000;
     setArg("-avaminquorumstake", ToString(minStake));
     setArg("-avaminquorumconnectedstakeratio", "0.5");
 
     // Create a new processor with our given quorum parameters
     const auto currency = Currency::get();
     uint32_t minScore = Proof::amountToScore(minStake * currency.baseunit);
 
     Chainstate &active_chainstate = Assert(m_node.chainman)->ActiveChainstate();
 
     const CKey key = CKey::MakeCompressedKey();
     auto localProof =
         buildRandomProof(active_chainstate, minScore / 4, 100, key);
     setArg("-avamasterkey", EncodeSecret(key));
     setArg("-avaproof", localProof->ToHex());
 
     bilingual_str error;
     ChainstateManager &chainman = *Assert(m_node.chainman);
     m_processor = Processor::MakeProcessor(
         *m_node.args, *m_node.chain, m_node.connman.get(), chainman,
         m_node.mempool.get(), *m_node.scheduler, error);
 
     BOOST_CHECK(m_processor != nullptr);
     BOOST_CHECK(m_processor->getLocalProof() != nullptr);
     BOOST_CHECK_EQUAL(m_processor->getLocalProof()->getId(),
                       localProof->getId());
     BOOST_CHECK_EQUAL(AvalancheTest::getMinQuorumScore(*m_processor), minScore);
     BOOST_CHECK_EQUAL(
         AvalancheTest::getMinQuorumConnectedScoreRatio(*m_processor), 0.5);
 
     // The local proof has not been validated yet
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 0);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), 0);
     });
     BOOST_CHECK(!m_processor->isQuorumEstablished());
 
     // Register the local proof. This is normally done when the chain tip is
     // updated. The local proof should be accounted for in the min quorum
     // computation but the peer manager doesn't know about that.
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(m_processor->getLocalProof()));
         BOOST_CHECK(pm.isBoundToPeer(m_processor->getLocalProof()->getId()));
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), minScore / 4);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), 0);
     });
     BOOST_CHECK(!m_processor->isQuorumEstablished());
 
     // Add enough nodes to get a conclusive vote
     for (NodeId id = 0; id < 8; id++) {
         m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             pm.addNode(id, m_processor->getLocalProof()->getId());
             BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), minScore / 4);
             BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 4);
         });
     }
 
     // Add part of the required stake and make sure we still report no quorum
     auto proof1 = buildRandomProof(active_chainstate, minScore / 2);
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(proof1));
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 3 * minScore / 4);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 4);
     });
     BOOST_CHECK(!m_processor->isQuorumEstablished());
 
     // Add the rest of the stake, but we are still lacking connected stake
     const int64_t tipTime =
         WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip())
             ->GetBlockTime();
     const COutPoint utxo{TxId(GetRandHash()), 0};
     const Amount amount = (int64_t(minScore / 4) * COIN) / 100;
     const int height = 100;
     const bool isCoinbase = false;
     {
         LOCK(cs_main);
         CCoinsViewCache &coins = active_chainstate.CoinsTip();
         coins.AddCoin(utxo,
                       Coin(CTxOut(amount, GetScriptForDestination(
                                               PKHash(key.GetPubKey()))),
                            height, isCoinbase),
                       false);
     }
     ProofBuilder pb(1, tipTime + 1, key, UNSPENDABLE_ECREG_PAYOUT_SCRIPT);
     BOOST_CHECK(pb.addUTXO(utxo, amount, height, isCoinbase, key));
     auto proof2 = pb.build();
 
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(proof2));
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), minScore);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 4);
     });
     BOOST_CHECK(!m_processor->isQuorumEstablished());
 
     // Adding a node should cause the quorum to be detected and locked-in
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         pm.addNode(8, proof2->getId());
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), minScore);
         // The peer manager knows that proof2 has a node attached ...
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 2);
     });
     // ... but the processor also account for the local proof, so we reached 50%
     BOOST_CHECK(m_processor->isQuorumEstablished());
 
     // Go back to not having enough connected score, but we've already latched
     // the quorum as established
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         pm.removeNode(8);
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), minScore);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 4);
     });
     BOOST_CHECK(m_processor->isQuorumEstablished());
 
     // Removing one more node drops our count below the minimum and the quorum
     // is no longer ready
     m_processor->withPeerManager(
         [&](avalanche::PeerManager &pm) { pm.removeNode(7); });
     BOOST_CHECK(!m_processor->isQuorumEstablished());
 
     // It resumes when we have enough nodes again
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         pm.addNode(7, m_processor->getLocalProof()->getId());
     });
     BOOST_CHECK(m_processor->isQuorumEstablished());
 
     // Remove peers one at a time until the quorum is no longer established
     auto spendProofUtxo = [&](ProofRef proof) {
         {
             LOCK(cs_main);
             CCoinsViewCache &coins = chainman.ActiveChainstate().CoinsTip();
             coins.SpendCoin(proof->getStakes()[0].getStake().getUTXO());
         }
         m_processor->withPeerManager([&proof](avalanche::PeerManager &pm) {
             pm.updatedBlockTip();
             BOOST_CHECK(!pm.isBoundToPeer(proof->getId()));
         });
     };
 
     // Expire proof2, the quorum is still latched
     for (int64_t i = 0; i < 6; i++) {
         SetMockTime(proof2->getExpirationTime() + i);
         CreateAndProcessBlock({}, CScript());
     }
     BOOST_CHECK_EQUAL(
         WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip())
             ->GetMedianTimePast(),
         proof2->getExpirationTime());
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         pm.updatedBlockTip();
         BOOST_CHECK(!pm.exists(proof2->getId()));
     });
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 3 * minScore / 4);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 4);
     });
     BOOST_CHECK(m_processor->isQuorumEstablished());
 
     spendProofUtxo(proof1);
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), minScore / 4);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), minScore / 4);
     });
     BOOST_CHECK(m_processor->isQuorumEstablished());
 
     spendProofUtxo(m_processor->getLocalProof());
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK_EQUAL(pm.getTotalPeersScore(), 0);
         BOOST_CHECK_EQUAL(pm.getConnectedPeersScore(), 0);
     });
     // There is no node left
     BOOST_CHECK(!m_processor->isQuorumEstablished());
 }
 
 BOOST_AUTO_TEST_CASE(quorum_detection_parameter_validation) {
     // Create vector of tuples of:
     // <min stake, min ratio, min avaproofs messages, success bool>
     const std::vector<std::tuple<std::string, std::string, std::string, bool>>
         testCases = {
             // All parameters are invalid
             {"", "", "", false},
             {"-1", "-1", "-1", false},
 
             // Min stake is out of range
             {"-1", "0", "0", false},
             {"-0.01", "0", "0", false},
             {"21000000000000.01", "0", "0", false},
 
             // Min connected ratio is out of range
             {"0", "-1", "0", false},
             {"0", "1.1", "0", false},
 
             // Min avaproofs messages ratio is out of range
             {"0", "0", "-1", false},
 
             // All parameters are valid
             {"0", "0", "0", true},
             {"0.00", "0", "0", true},
             {"0.01", "0", "0", true},
             {"1", "0.1", "0", true},
             {"10", "0.5", "0", true},
             {"10", "1", "0", true},
             {"21000000000000.00", "0", "0", true},
             {"0", "0", "1", true},
             {"0", "0", "100", true},
         };
 
     // For each case set the parameters and check that making the processor
     // succeeds or fails as expected
     for (const auto &[stake, stakeRatio, numProofsMessages, success] :
          testCases) {
         setArg("-avaminquorumstake", stake);
         setArg("-avaminquorumconnectedstakeratio", stakeRatio);
         setArg("-avaminavaproofsnodecount", numProofsMessages);
 
         bilingual_str error;
         std::unique_ptr<Processor> processor = Processor::MakeProcessor(
             *m_node.args, *m_node.chain, m_node.connman.get(),
             *Assert(m_node.chainman), m_node.mempool.get(), *m_node.scheduler,
             error);
 
         if (success) {
             BOOST_CHECK(processor != nullptr);
             BOOST_CHECK(error.empty());
             BOOST_CHECK_EQUAL(error.original, "");
         } else {
             BOOST_CHECK(processor == nullptr);
             BOOST_CHECK(!error.empty());
             BOOST_CHECK(error.original != "");
         }
     }
 }
 
 BOOST_AUTO_TEST_CASE(min_avaproofs_messages) {
     ChainstateManager &chainman = *Assert(m_node.chainman);
 
     auto checkMinAvaproofsMessages = [&](int64_t minAvaproofsMessages) {
         setArg("-avaminavaproofsnodecount", ToString(minAvaproofsMessages));
 
         bilingual_str error;
         auto processor = Processor::MakeProcessor(
             *m_node.args, *m_node.chain, m_node.connman.get(), chainman,
             m_node.mempool.get(), *m_node.scheduler, error);
 
         auto addNode = [&](NodeId nodeid) {
             auto proof = buildRandomProof(chainman.ActiveChainstate(),
                                           MIN_VALID_PROOF_SCORE);
             processor->withPeerManager([&](avalanche::PeerManager &pm) {
                 BOOST_CHECK(pm.registerProof(proof));
                 BOOST_CHECK(pm.addNode(nodeid, proof->getId()));
             });
         };
 
         // Add enough node to have a conclusive vote, but don't account any
         // avaproofs.
         // NOTE: we can't use the test facilites like ConnectNodes() because we
         // are not testing on m_processor.
         for (NodeId id = 100; id < 108; id++) {
             addNode(id);
         }
 
         BOOST_CHECK_EQUAL(processor->isQuorumEstablished(),
                           minAvaproofsMessages <= 0);
 
         for (int64_t i = 0; i < minAvaproofsMessages - 1; i++) {
             addNode(i);
 
             processor->avaproofsSent(i);
             BOOST_CHECK_EQUAL(processor->getAvaproofsNodeCounter(), i + 1);
 
             // Receiving again on the same node does not increase the counter
             processor->avaproofsSent(i);
             BOOST_CHECK_EQUAL(processor->getAvaproofsNodeCounter(), i + 1);
 
             BOOST_CHECK(!processor->isQuorumEstablished());
         }
 
         addNode(minAvaproofsMessages);
         processor->avaproofsSent(minAvaproofsMessages);
         BOOST_CHECK(processor->isQuorumEstablished());
 
         // Check the latch
         AvalancheTest::clearavaproofsNodeCounter(*processor);
         BOOST_CHECK(processor->isQuorumEstablished());
     };
 
     checkMinAvaproofsMessages(0);
     checkMinAvaproofsMessages(1);
     checkMinAvaproofsMessages(10);
     checkMinAvaproofsMessages(100);
 }
 
 BOOST_AUTO_TEST_CASE_TEMPLATE(voting_parameters, P, VoteItemProviders) {
     // Check that setting voting parameters has the expected effect
     setArg("-avastalevotethreshold",
            ToString(AVALANCHE_VOTE_STALE_MIN_THRESHOLD));
     setArg("-avastalevotefactor", "2");
 
     const std::vector<std::tuple<int, int>> testCases = {
         // {number of yes votes, number of neutral votes}
         {0, AVALANCHE_VOTE_STALE_MIN_THRESHOLD},
         {AVALANCHE_FINALIZATION_SCORE + 4, AVALANCHE_FINALIZATION_SCORE - 6},
     };
 
     bilingual_str error;
     m_processor = Processor::MakeProcessor(
         *m_node.args, *m_node.chain, m_node.connman.get(),
         *Assert(m_node.chainman), m_node.mempool.get(), *m_node.scheduler,
         error);
 
     BOOST_CHECK(m_processor != nullptr);
     BOOST_CHECK(error.empty());
 
     P provider(this);
     const uint32_t invType = provider.invType;
 
     const auto item = provider.buildVoteItem();
     const auto itemid = provider.getVoteItemId(item);
 
     // Create nodes that supports avalanche.
     auto avanodes = ConnectNodes();
     int nextNodeIndex = 0;
 
     std::vector<avalanche::VoteItemUpdate> updates;
     for (const auto &[numYesVotes, numNeutralVotes] : testCases) {
         // Add a new item. Check it is added to the polls.
         BOOST_CHECK(addToReconcile(item));
         auto invs = getInvsForNextPoll();
         BOOST_CHECK_EQUAL(invs.size(), 1);
         BOOST_CHECK_EQUAL(invs[0].type, invType);
         BOOST_CHECK(invs[0].hash == itemid);
 
         BOOST_CHECK(m_processor->isAccepted(item));
 
         auto registerNewVote = [&](const Response &resp) {
             runEventLoop();
             auto nodeid = avanodes[nextNodeIndex++ % avanodes.size()]->GetId();
             BOOST_CHECK(registerVotes(nodeid, resp, updates));
         };
 
         // Add some confidence
         for (int i = 0; i < numYesVotes; i++) {
             Response resp = {getRound(), 0, {Vote(0, itemid)}};
             registerNewVote(next(resp));
             BOOST_CHECK(m_processor->isAccepted(item));
             BOOST_CHECK_EQUAL(m_processor->getConfidence(item),
                               i >= 6 ? i - 5 : 0);
             BOOST_CHECK_EQUAL(updates.size(), 0);
         }
 
         // Vote until just before item goes stale
         for (int i = 0; i < numNeutralVotes; i++) {
             Response resp = {getRound(), 0, {Vote(-1, itemid)}};
             registerNewVote(next(resp));
             BOOST_CHECK_EQUAL(updates.size(), 0);
         }
 
         // As long as it is not stale, we poll.
         invs = getInvsForNextPoll();
         BOOST_CHECK_EQUAL(invs.size(), 1);
         BOOST_CHECK_EQUAL(invs[0].type, invType);
         BOOST_CHECK(invs[0].hash == itemid);
 
         // Now stale
         Response resp = {getRound(), 0, {Vote(-1, itemid)}};
         registerNewVote(next(resp));
         BOOST_CHECK_EQUAL(updates.size(), 1);
         BOOST_CHECK(provider.fromAnyVoteItem(updates[0].getVoteItem()) == item);
         BOOST_CHECK(updates[0].getStatus() == VoteStatus::Stale);
         updates.clear();
 
         // Once stale, there is no poll for it.
         invs = getInvsForNextPoll();
         BOOST_CHECK_EQUAL(invs.size(), 0);
     }
 }
 
 BOOST_AUTO_TEST_CASE(block_vote_finalization_tip) {
     BlockProvider provider(this);
 
     std::vector<CBlockIndex *> blockIndexes;
     for (size_t i = 0; i < AVALANCHE_MAX_ELEMENT_POLL; i++) {
         CBlockIndex *pindex = provider.buildVoteItem();
         BOOST_CHECK(addToReconcile(pindex));
         blockIndexes.push_back(pindex);
     }
 
     auto invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), AVALANCHE_MAX_ELEMENT_POLL);
     for (size_t i = 0; i < AVALANCHE_MAX_ELEMENT_POLL; i++) {
         BOOST_CHECK_EQUAL(
             invs[i].hash,
             blockIndexes[AVALANCHE_MAX_ELEMENT_POLL - i - 1]->GetBlockHash());
     }
 
     // Build a vote vector with the 11th block only being accepted and others
     // unknown.
     const BlockHash eleventhBlockHash =
         blockIndexes[AVALANCHE_MAX_ELEMENT_POLL - 10 - 1]->GetBlockHash();
     std::vector<Vote> votes;
     votes.reserve(AVALANCHE_MAX_ELEMENT_POLL);
     for (size_t i = AVALANCHE_MAX_ELEMENT_POLL; i > 0; i--) {
         BlockHash blockhash = blockIndexes[i - 1]->GetBlockHash();
         votes.emplace_back(blockhash == eleventhBlockHash ? 0 : -1, blockhash);
     }
 
     auto avanodes = ConnectNodes();
     int nextNodeIndex = 0;
 
     std::vector<avalanche::VoteItemUpdate> updates;
     auto registerNewVote = [&]() {
         Response resp = {getRound(), 0, votes};
         runEventLoop();
         auto nodeid = avanodes[nextNodeIndex++ % avanodes.size()]->GetId();
         BOOST_CHECK(registerVotes(nodeid, resp, updates));
     };
 
     // Vote for the blocks until the one being accepted finalizes
     bool eleventhBlockFinalized = false;
     for (size_t i = 0; i < 10000 && !eleventhBlockFinalized; i++) {
         registerNewVote();
 
         for (auto &update : updates) {
             if (update.getStatus() == VoteStatus::Finalized &&
                 provider.fromAnyVoteItem(update.getVoteItem())
                         ->GetBlockHash() == eleventhBlockHash) {
                 eleventhBlockFinalized = true;
             }
         }
     }
     BOOST_CHECK(eleventhBlockFinalized);
 
     // From now only the 10 blocks with more work are polled for
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 10);
     for (size_t i = 0; i < 10; i++) {
         BOOST_CHECK_EQUAL(
             invs[i].hash,
             blockIndexes[AVALANCHE_MAX_ELEMENT_POLL - i - 1]->GetBlockHash());
     }
 
     // Adding ancestor blocks to reconcile will fail
     for (size_t i = 0; i < AVALANCHE_MAX_ELEMENT_POLL - 10 - 1; i++) {
         BOOST_CHECK(!addToReconcile(blockIndexes[i]));
     }
 
     // Create a couple concurrent chain tips
     CBlockIndex *tip = provider.buildVoteItem();
 
     auto &activeChainstate = m_node.chainman->ActiveChainstate();
     BlockValidationState state;
     activeChainstate.InvalidateBlock(state, tip);
 
     // Use another script to make sure we don't generate the same block again
     CBlock altblock = CreateAndProcessBlock({}, CScript() << OP_TRUE);
     auto alttip = WITH_LOCK(
         cs_main, return Assert(m_node.chainman)
                      ->m_blockman.LookupBlockIndex(altblock.GetHash()));
     BOOST_CHECK(alttip);
     BOOST_CHECK(alttip->pprev == tip->pprev);
     BOOST_CHECK(alttip->GetBlockHash() != tip->GetBlockHash());
 
     // Reconsider the previous tip valid, so we have concurrent tip candidates
     {
         LOCK(cs_main);
         activeChainstate.ResetBlockFailureFlags(tip);
     }
     activeChainstate.ActivateBestChain(state);
 
     BOOST_CHECK(addToReconcile(tip));
     BOOST_CHECK(addToReconcile(alttip));
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 12);
 
     // Vote for the tip until it finalizes
     BlockHash tiphash = tip->GetBlockHash();
     votes.clear();
     votes.reserve(12);
     for (auto &inv : invs) {
         votes.emplace_back(inv.hash == tiphash ? 0 : -1, inv.hash);
     }
 
     bool tipFinalized = false;
     for (size_t i = 0; i < 10000 && !tipFinalized; i++) {
         registerNewVote();
 
         for (auto &update : updates) {
             if (update.getStatus() == VoteStatus::Finalized &&
                 provider.fromAnyVoteItem(update.getVoteItem())
                         ->GetBlockHash() == tiphash) {
                 tipFinalized = true;
             }
         }
     }
     BOOST_CHECK(tipFinalized);
 
     // Now the tip and all its ancestors will be removed from polls. Only the
     // alttip remains because it is on a forked chain so we want to keep polling
     // for that one until it's invalidated or stalled.
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].hash, alttip->GetBlockHash());
 
     // Cannot reconcile a finalized block
     BOOST_CHECK(!addToReconcile(tip));
 
     // Vote for alttip until it invalidates
     BlockHash alttiphash = alttip->GetBlockHash();
     votes = {{1, alttiphash}};
 
     bool alttipInvalidated = false;
     for (size_t i = 0; i < 10000 && !alttipInvalidated; i++) {
         registerNewVote();
 
         for (auto &update : updates) {
             if (update.getStatus() == VoteStatus::Invalid &&
                 provider.fromAnyVoteItem(update.getVoteItem())
                         ->GetBlockHash() == alttiphash) {
                 alttipInvalidated = true;
             }
         }
     }
     BOOST_CHECK(alttipInvalidated);
     invs = getInvsForNextPoll();
     BOOST_CHECK_EQUAL(invs.size(), 0);
 
     // Cannot reconcile an invalidated block
     BOOST_CHECK(!addToReconcile(alttip));
 }
 
 BOOST_AUTO_TEST_CASE(vote_map_comparator) {
     ChainstateManager &chainman = *Assert(m_node.chainman);
     Chainstate &activeChainState = chainman.ActiveChainstate();
 
     const int numberElementsEachType = 100;
     FastRandomContext rng;
 
     std::vector<ProofRef> proofs;
     for (size_t i = 1; i <= numberElementsEachType; i++) {
         auto proof =
             buildRandomProof(activeChainState, i * MIN_VALID_PROOF_SCORE);
         BOOST_CHECK(proof != nullptr);
         proofs.emplace_back(std::move(proof));
     }
     Shuffle(proofs.begin(), proofs.end(), rng);
 
     std::vector<CBlockIndex> indexes;
     for (size_t i = 1; i <= numberElementsEachType; i++) {
         CBlockIndex index;
         index.nChainWork = i;
         indexes.emplace_back(std::move(index));
     }
     Shuffle(indexes.begin(), indexes.end(), rng);
 
     CTxMemPool *mempool = Assert(m_node.mempool.get());
     TestMemPoolEntryHelper mempoolEntryHelper;
     std::vector<CTransactionRef> txs;
     for (size_t i = 1; i <= numberElementsEachType; i++) {
         CMutableTransaction mtx;
         mtx.nVersion = 2;
         mtx.vin.emplace_back(COutPoint{TxId(rng.rand256()), 0});
         mtx.vout.emplace_back(1000 * COIN, CScript() << OP_TRUE);
 
         CTransactionRef tx = MakeTransactionRef(std::move(mtx));
 
         auto entry = mempoolEntryHelper.Fee(int64_t(i) * COIN).FromTx(tx);
         {
             LOCK2(cs_main, mempool->cs);
             mempool->addUnchecked(entry);
             BOOST_CHECK(mempool->exists(tx->GetId()));
         }
 
         txs.emplace_back(std::move(tx));
     }
 
     auto allItems =
         std::make_tuple(std::move(proofs), std::move(indexes), std::move(txs));
     static const size_t numTypes = std::tuple_size<decltype(allItems)>::value;
 
     RWCollection<VoteMap> voteMap(VoteMap(m_node.mempool.get()));
 
     {
         auto writeView = voteMap.getWriteView();
         for (size_t i = 0; i < numberElementsEachType; i++) {
             // Randomize the insert order at each loop increment
             const size_t firstType = rng.randrange(numTypes);
 
             for (size_t j = 0; j < numTypes; j++) {
                 switch ((firstType + j) % numTypes) {
                     // ProofRef
                     case 0:
                         writeView->insert(std::make_pair(
                             std::get<0>(allItems)[i], VoteRecord(true)));
                         break;
                     // CBlockIndex *
                     case 1:
                         writeView->insert(std::make_pair(
                             &std::get<1>(allItems)[i], VoteRecord(true)));
                         break;
                     // CTransactionRef
                     case 2:
                         writeView->insert(std::make_pair(
                             std::get<2>(allItems)[i], VoteRecord(true)));
                         break;
                     default:
                         break;
                 }
             }
         }
     }
 
     {
         // Check ordering
         auto readView = voteMap.getReadView();
         auto it = readView.begin();
 
         // The first batch of items is the proofs ordered by score
         // (descending)
         uint32_t lastScore = std::numeric_limits<uint32_t>::max();
         for (size_t i = 0; i < numberElementsEachType; i++) {
             BOOST_CHECK(std::holds_alternative<const ProofRef>(it->first));
 
             uint32_t currentScore =
                 std::get<const ProofRef>(it->first)->getScore();
             BOOST_CHECK_LT(currentScore, lastScore);
             lastScore = currentScore;
 
             it++;
         }
 
         // The next batch of items is the block indexes ordered by work
         // (descending)
         arith_uint256 lastWork = ~arith_uint256(0);
         for (size_t i = 0; i < numberElementsEachType; i++) {
             BOOST_CHECK(std::holds_alternative<const CBlockIndex *>(it->first));
 
             arith_uint256 currentWork =
                 std::get<const CBlockIndex *>(it->first)->nChainWork;
             BOOST_CHECK(currentWork < lastWork);
             lastWork = currentWork;
 
             it++;
         }
 
         // The last batch of items is the txs ordered by modified fee rate
         CFeeRate lastFeeRate{MAX_MONEY};
         {
             LOCK(mempool->cs);
 
             for (size_t i = 0; i < numberElementsEachType; i++) {
                 BOOST_CHECK(
                     std::holds_alternative<const CTransactionRef>(it->first));
 
                 auto iter = mempool->GetIter(
                     std::get<const CTransactionRef>(it->first)->GetId());
                 BOOST_CHECK(iter.has_value());
 
                 CFeeRate currentFeeRate = (**iter)->GetModifiedFeeRate();
 
                 BOOST_CHECK(currentFeeRate < lastFeeRate);
                 lastFeeRate = currentFeeRate;
 
                 it++;
             }
         }
 
         BOOST_CHECK(it == readView.end());
     }
 }
 
 BOOST_AUTO_TEST_CASE(vote_map_tx_comparator) {
     CTxMemPool *mempool = Assert(m_node.mempool.get());
     TestMemPoolEntryHelper mempoolEntryHelper;
     TxProvider provider(this);
 
     std::vector<CTransactionRef> txs;
     for (size_t i = 0; i < 5; i++) {
         txs.emplace_back(provider.buildVoteItem());
     }
 
     {
         // When there is no mempool, the txs are sorted by txid
         RWCollection<VoteMap> voteMap(VoteMap(nullptr));
         {
             auto writeView = voteMap.getWriteView();
             for (const auto &tx : txs) {
                 writeView->insert(std::make_pair(tx, VoteRecord(true)));
             }
         }
 
         auto readView = voteMap.getReadView();
         TxId lastTxId{uint256::ZERO};
         for (const auto &[item, vote] : readView) {
             auto tx = std::get<const CTransactionRef>(item);
             BOOST_CHECK_GT(tx->GetId(), lastTxId);
             lastTxId = tx->GetId();
         }
     }
 
     // Remove the 5 first txs from the mempool, and add 5 more
     mempool->clear();
     for (size_t i = 0; i < 5; i++) {
         txs.emplace_back(provider.buildVoteItem());
     }
 
     {
         RWCollection<VoteMap> voteMap((VoteMap(mempool)));
 
         {
             auto writeView = voteMap.getWriteView();
             for (const auto &tx : txs) {
                 writeView->insert(std::make_pair(tx, VoteRecord(true)));
             }
         }
 
         auto readView = voteMap.getReadView();
         auto it = readView.begin();
 
         LOCK(mempool->cs);
 
         // The first 5 txs are sorted by fee
         CFeeRate lastFeeRate{MAX_MONEY};
         for (size_t i = 0; i < 5; i++) {
             auto tx = std::get<const CTransactionRef>(it->first);
 
             auto iter = mempool->GetIter(tx->GetId());
             BOOST_CHECK(iter.has_value());
 
             BOOST_CHECK((**iter)->GetModifiedFeeRate() <= lastFeeRate);
             lastFeeRate = (**iter)->GetModifiedFeeRate();
             it++;
         }
 
         // The last 5 txs are sorted by txid
         TxId lastTxId{uint256::ZERO};
         for (size_t i = 0; i < 5; i++) {
             auto tx = std::get<const CTransactionRef>(it->first);
 
             BOOST_CHECK(!mempool->exists(tx->GetId()));
 
             BOOST_CHECK_GT(tx->GetId(), lastTxId);
             lastTxId = tx->GetId();
             it++;
         }
     }
 }
 
 BOOST_AUTO_TEST_CASE(block_reconcile_initial_vote) {
     auto &chainman = Assert(m_node.chainman);
     Chainstate &chainstate = chainman->ActiveChainstate();
 
     const auto block = std::make_shared<const CBlock>(
         this->CreateBlock({}, CScript(), chainstate));
     const BlockHash blockhash = block->GetHash();
 
     BlockValidationState state;
     CBlockIndex *blockindex;
     {
         LOCK(cs_main);
         BOOST_CHECK(chainstate.AcceptBlock(block, state,
                                            /*fRequested=*/true, /*dbp=*/nullptr,
                                            /*fNewBlock=*/nullptr,
                                            /*min_pow_checked=*/true));
 
         blockindex = chainman->m_blockman.LookupBlockIndex(blockhash);
         BOOST_CHECK(blockindex);
     }
 
     // ActivateBestChain() interacts with g_avalanche, so make it happy
     g_avalanche = std::move(m_processor);
 
     // The block is not connected yet, and not added to the poll list yet
     BOOST_CHECK(AvalancheTest::getInvsForNextPoll(*g_avalanche).empty());
     BOOST_CHECK(!g_avalanche->isAccepted(blockindex));
 
     // Call ActivateBestChain to connect the new block
     BOOST_CHECK(chainstate.ActivateBestChain(state, block));
     // It is a valid block so the tip is updated
     BOOST_CHECK_EQUAL(chainstate.m_chain.Tip(), blockindex);
 
     // Check the block is added to the poll
     auto invs = AvalancheTest::getInvsForNextPoll(*g_avalanche);
     BOOST_CHECK_EQUAL(invs.size(), 1);
     BOOST_CHECK_EQUAL(invs[0].type, MSG_BLOCK);
     BOOST_CHECK_EQUAL(invs[0].hash, blockhash);
 
     // This block is our new tip so we should vote "yes"
     BOOST_CHECK(g_avalanche->isAccepted(blockindex));
 
     // Prevent a data race between UpdatedBlockTip and the Processor destructor
     SyncWithValidationInterfaceQueue();
 
     g_avalanche.reset(nullptr);
 }
 
 BOOST_AUTO_TEST_CASE(compute_staking_rewards) {
     auto now = GetTime<std::chrono::seconds>();
     SetMockTime(now);
 
     // Pick in the middle
     BlockHash prevBlockHash{uint256::ZERO};
 
     std::vector<CScript> winners;
 
     BOOST_CHECK(!m_processor->getStakingRewardWinners(prevBlockHash, winners));
 
     // Null index
     BOOST_CHECK(!m_processor->computeStakingReward(nullptr));
     BOOST_CHECK(!m_processor->getStakingRewardWinners(prevBlockHash, winners));
 
     CBlockIndex prevBlock;
     prevBlock.phashBlock = &prevBlockHash;
     prevBlock.nHeight = 100;
     prevBlock.nTime = now.count();
 
     // No quorum
     BOOST_CHECK(!m_processor->computeStakingReward(&prevBlock));
     BOOST_CHECK(!m_processor->getStakingRewardWinners(prevBlockHash, winners));
 
     setArg("-avaminquorumstake", "0");
     setArg("-avaminquorumconnectedstakeratio", "0");
     setArg("-avaminavaproofsnodecount", "0");
 
     // Setup a bunch of proofs
     size_t numProofs = 10;
     std::vector<ProofRef> proofs;
     proofs.reserve(numProofs);
     for (size_t i = 0; i < numProofs; i++) {
         const CKey key = CKey::MakeCompressedKey();
         CScript payoutScript = GetScriptForRawPubKey(key.GetPubKey());
 
         auto proof = GetProof(payoutScript);
         m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
             BOOST_CHECK(pm.registerProof(proof));
             BOOST_CHECK(pm.addNode(i, proof->getId()));
             // Finalize the proof
             BOOST_CHECK(pm.forPeer(proof->getId(), [&](const Peer peer) {
                 return pm.setFinalized(peer.peerid);
             }));
         });
 
         proofs.emplace_back(std::move(proof));
     }
 
     BOOST_CHECK(m_processor->isQuorumEstablished());
 
     // Proofs are too recent so we still have no winner
     BOOST_CHECK(!m_processor->computeStakingReward(&prevBlock));
     BOOST_CHECK(!m_processor->getStakingRewardWinners(prevBlockHash, winners));
 
     // Make sure we picked a payout script from one of our proofs
     auto winnerExists = [&](const CScript &expectedWinner) {
         const std::string winnerString = FormatScript(expectedWinner);
 
         for (const ProofRef &proof : proofs) {
             if (winnerString == FormatScript(proof->getPayoutScript())) {
                 return true;
             }
         }
         return false;
     };
 
     // Elapse some time
     now += 1h + 1s;
     SetMockTime(now);
     prevBlock.nTime = now.count();
 
     // Now we successfully inserted a winner in our map
     BOOST_CHECK(m_processor->computeStakingReward(&prevBlock));
     BOOST_CHECK(m_processor->getStakingRewardWinners(prevBlockHash, winners));
     BOOST_CHECK(winnerExists(winners[0]));
 
     // Subsequent calls are a no-op
     BOOST_CHECK(m_processor->computeStakingReward(&prevBlock));
     BOOST_CHECK(m_processor->getStakingRewardWinners(prevBlockHash, winners));
     BOOST_CHECK(winnerExists(winners[0]));
 
     CBlockIndex prevBlockHigh = prevBlock;
     BlockHash prevBlockHashHigh =
         BlockHash(ArithToUint256({std::numeric_limits<uint64_t>::max()}));
     prevBlockHigh.phashBlock = &prevBlockHashHigh;
     prevBlockHigh.nHeight = 101;
     BOOST_CHECK(m_processor->computeStakingReward(&prevBlockHigh));
     BOOST_CHECK(
         m_processor->getStakingRewardWinners(prevBlockHashHigh, winners));
     BOOST_CHECK(winnerExists(winners[0]));
 
     // No impact on previous winner so far
     BOOST_CHECK(m_processor->getStakingRewardWinners(prevBlockHash, winners));
     BOOST_CHECK(winnerExists(winners[0]));
 
     // Cleanup to height 101
     m_processor->cleanupStakingRewards(101);
 
     // Now the previous winner has been cleared
     BOOST_CHECK(!m_processor->getStakingRewardWinners(prevBlockHash, winners));
 
     // But the last one remain
     BOOST_CHECK(
         m_processor->getStakingRewardWinners(prevBlockHashHigh, winners));
     BOOST_CHECK(winnerExists(winners[0]));
 
     // We can add it again
     BOOST_CHECK(m_processor->computeStakingReward(&prevBlock));
     BOOST_CHECK(m_processor->getStakingRewardWinners(prevBlockHash, winners));
     BOOST_CHECK(winnerExists(winners[0]));
 
     // Cleanup to higher height
     m_processor->cleanupStakingRewards(200);
 
     // No winner anymore
     BOOST_CHECK(!m_processor->getStakingRewardWinners(prevBlockHash, winners));
     BOOST_CHECK(
         !m_processor->getStakingRewardWinners(prevBlockHashHigh, winners));
 }
 
 BOOST_AUTO_TEST_CASE(local_proof_status) {
     const CKey key = CKey::MakeCompressedKey();
 
     const COutPoint outpoint{TxId(GetRandHash()), 0};
     {
         CScript script = GetScriptForDestination(PKHash(key.GetPubKey()));
 
         LOCK(cs_main);
         CCoinsViewCache &coins =
             Assert(m_node.chainman)->ActiveChainstate().CoinsTip();
         coins.AddCoin(outpoint,
                       Coin(CTxOut(PROOF_DUST_THRESHOLD, script), 100, false),
                       false);
     }
 
     auto buildProof = [&](const COutPoint &outpoint, uint64_t sequence,
                           uint32_t height) {
         ProofBuilder pb(sequence, 0, key, UNSPENDABLE_ECREG_PAYOUT_SCRIPT);
         BOOST_CHECK(
             pb.addUTXO(outpoint, PROOF_DUST_THRESHOLD, height, false, key));
         return pb.build();
     };
 
     auto localProof = buildProof(outpoint, 1, 100);
 
     setArg("-avamasterkey", EncodeSecret(key));
     setArg("-avaproof", localProof->ToHex());
     setArg("-avalancheconflictingproofcooldown", "0");
     setArg("-avalanchepeerreplacementcooldown", "0");
     setArg("-avaproofstakeutxoconfirmations", "3");
 
     bilingual_str error;
     ChainstateManager &chainman = *Assert(m_node.chainman);
     m_processor = Processor::MakeProcessor(
         *m_node.args, *m_node.chain, m_node.connman.get(), chainman,
         m_node.mempool.get(), *m_node.scheduler, error);
 
     BOOST_CHECK_EQUAL(m_processor->getLocalProof()->getId(),
                       localProof->getId());
 
     auto checkLocalProofState =
         [&](const bool boundToPeer,
             const ProofRegistrationResult expectedResult) {
             BOOST_CHECK_EQUAL(
                 m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
                     return pm.isBoundToPeer(localProof->getId());
                 }),
                 boundToPeer);
             BOOST_CHECK_MESSAGE(
                 m_processor->getLocalProofRegistrationState().GetResult() ==
                     expectedResult,
                 m_processor->getLocalProofRegistrationState().ToString());
         };
 
     checkLocalProofState(false, ProofRegistrationResult::NONE);
 
     // Not ready to share, the local proof isn't registered
     BOOST_CHECK(!m_processor->canShareLocalProof());
     AvalancheTest::updatedBlockTip(*m_processor);
     checkLocalProofState(false, ProofRegistrationResult::NONE);
 
     // Ready to share, but the proof is immature
     AvalancheTest::setLocalProofShareable(*m_processor, true);
     BOOST_CHECK(m_processor->canShareLocalProof());
     AvalancheTest::updatedBlockTip(*m_processor);
     checkLocalProofState(false, ProofRegistrationResult::IMMATURE);
 
     // Mine a block to re-evaluate the proof, it remains immature
     mineBlocks(1);
     AvalancheTest::updatedBlockTip(*m_processor);
     checkLocalProofState(false, ProofRegistrationResult::IMMATURE);
 
     // One more block and the proof turns mature
     mineBlocks(1);
     AvalancheTest::updatedBlockTip(*m_processor);
     checkLocalProofState(true, ProofRegistrationResult::NONE);
 
     // Build a conflicting proof and check the status is updated accordingly
     auto conflictingProof = buildProof(outpoint, 2, 100);
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(conflictingProof));
         BOOST_CHECK(pm.isBoundToPeer(conflictingProof->getId()));
         BOOST_CHECK(pm.isInConflictingPool(localProof->getId()));
     });
     AvalancheTest::updatedBlockTip(*m_processor);
     checkLocalProofState(false, ProofRegistrationResult::CONFLICTING);
 }
 
 BOOST_AUTO_TEST_CASE(reconcileOrFinalize) {
     setArg("-avalancheconflictingproofcooldown", "0");
     setArg("-avalanchepeerreplacementcooldown", "0");
 
     // Proof is null
     BOOST_CHECK(!m_processor->reconcileOrFinalize(ProofRef()));
 
     ChainstateManager &chainman = *Assert(m_node.chainman);
     Chainstate &activeChainState = chainman.ActiveChainstate();
 
     const CKey key = CKey::MakeCompressedKey();
     const COutPoint outpoint{TxId(GetRandHash()), 0};
     {
         CScript script = GetScriptForDestination(PKHash(key.GetPubKey()));
 
         LOCK(cs_main);
         CCoinsViewCache &coins = activeChainState.CoinsTip();
         coins.AddCoin(outpoint,
                       Coin(CTxOut(PROOF_DUST_THRESHOLD, script), 100, false),
                       false);
     }
 
     auto buildProof = [&](const COutPoint &outpoint, uint64_t sequence) {
         ProofBuilder pb(sequence, 0, key, UNSPENDABLE_ECREG_PAYOUT_SCRIPT);
         BOOST_CHECK(
             pb.addUTXO(outpoint, PROOF_DUST_THRESHOLD, 100, false, key));
         return pb.build();
     };
 
     auto proof = buildProof(outpoint, 1);
     BOOST_CHECK(proof);
 
     // Not a peer nor conflicting
     BOOST_CHECK(!m_processor->reconcileOrFinalize(proof));
 
     // Register the proof
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(proof));
         BOOST_CHECK(pm.isBoundToPeer(proof->getId()));
         BOOST_CHECK(!pm.isInConflictingPool(proof->getId()));
     });
 
     // Reconcile works
     BOOST_CHECK(m_processor->reconcileOrFinalize(proof));
     // Repeated calls fail and do nothing
     BOOST_CHECK(!m_processor->reconcileOrFinalize(proof));
 
     // Finalize
     AvalancheTest::addProofToRecentfinalized(*m_processor, proof->getId());
     BOOST_CHECK(m_processor->isRecentlyFinalized(proof->getId()));
     BOOST_CHECK(m_processor->reconcileOrFinalize(proof));
 
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         // The peer is marked as final
         BOOST_CHECK(pm.forPeer(proof->getId(), [&](const Peer &peer) {
             return peer.hasFinalized;
         }));
         BOOST_CHECK(pm.isBoundToPeer(proof->getId()));
         BOOST_CHECK(!pm.isInConflictingPool(proof->getId()));
     });
 
     // Same proof with a higher sequence number
     auto betterProof = buildProof(outpoint, 2);
     BOOST_CHECK(betterProof);
 
     // Not registered nor conflicting yet
     BOOST_CHECK(!m_processor->reconcileOrFinalize(betterProof));
 
     m_processor->withPeerManager([&](avalanche::PeerManager &pm) {
         BOOST_CHECK(pm.registerProof(betterProof));
         BOOST_CHECK(pm.isBoundToPeer(betterProof->getId()));
         BOOST_CHECK(!pm.isInConflictingPool(betterProof->getId()));
 
         BOOST_CHECK(!pm.isBoundToPeer(proof->getId()));
         BOOST_CHECK(pm.isInConflictingPool(proof->getId()));
     });
 
     // Recently finalized, not worth polling
     BOOST_CHECK(!m_processor->reconcileOrFinalize(proof));
     // But the better proof can be polled
     BOOST_CHECK(m_processor->reconcileOrFinalize(betterProof));
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/init.cpp b/src/init.cpp
index b232983f9a..6f2345d8a2 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1,2920 +1,2921 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2018 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #if defined(HAVE_CONFIG_H)
 #include <config/bitcoin-config.h>
 #endif
 
 #include <init.h>
 
 #include <kernel/mempool_persist.h>
 #include <kernel/validation_cache_sizes.h>
 
 #include <addrman.h>
 #include <avalanche/avalanche.h>
 #include <avalanche/processor.h>
 #include <avalanche/proof.h> // For AVALANCHE_LEGACY_PROOF_DEFAULT
 #include <avalanche/validation.h>
 #include <avalanche/voterecord.h> // For AVALANCHE_VOTE_STALE_*
 #include <banman.h>
 #include <blockfilter.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <common/args.h>
 #include <compat/sanity.h>
 #include <config.h>
 #include <consensus/amount.h>
 #include <currencyunit.h>
 #include <flatfile.h>
 #include <hash.h>
 #include <httprpc.h>
 #include <httpserver.h>
 #include <index/blockfilterindex.h>
 #include <index/coinstatsindex.h>
 #include <index/txindex.h>
 #include <init/common.h>
 #include <interfaces/chain.h>
 #include <interfaces/node.h>
 #include <mapport.h>
 #include <mempool_args.h>
 #include <net.h>
 #include <net_permissions.h>
 #include <net_processing.h>
 #include <netbase.h>
 #include <node/blockmanager_args.h>
 #include <node/blockstorage.h>
 #include <node/caches.h>
 #include <node/chainstate.h>
 #include <node/chainstatemanager_args.h>
 #include <node/context.h>
 #include <node/kernel_notifications.h>
 #include <node/mempool_persist_args.h>
 #include <node/miner.h>
 #include <node/ui_interface.h>
 #include <node/validation_cache_args.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <rpc/blockchain.h>
 #include <rpc/register.h>
 #include <rpc/server.h>
 #include <rpc/util.h>
 #include <scheduler.h>
 #include <script/scriptcache.h>
 #include <script/sigcache.h>
 #include <script/standard.h>
 #include <shutdown.h>
 #include <sync.h>
 #include <timedata.h>
 #include <torcontrol.h>
 #include <txdb.h>
 #include <txmempool.h>
 #include <util/asmap.h>
 #include <util/check.h>
 #include <util/fs.h>
 #include <util/fs_helpers.h>
 #include <util/moneystr.h>
 #include <util/string.h>
 #include <util/syserror.h>
 #include <util/thread.h>
 #include <util/threadnames.h>
 #include <util/translation.h>
 #include <validation.h>
 #include <validationinterface.h>
 #include <walletinitinterface.h>
 
 #include <boost/signals2/signal.hpp>
 
 #if ENABLE_CHRONIK
 #include <chronik-cpp/chronik.h>
 #endif
 
 #if ENABLE_ZMQ
 #include <zmq/zmqabstractnotifier.h>
 #include <zmq/zmqnotificationinterface.h>
 #include <zmq/zmqrpc.h>
 #endif
 
 #ifndef WIN32
 #include <cerrno>
 #include <csignal>
 #include <sys/stat.h>
 #endif
 #include <algorithm>
 #include <condition_variable>
 #include <cstdint>
 #include <cstdio>
 #include <fstream>
 #include <functional>
 #include <set>
 #include <string>
 #include <thread>
 #include <vector>
 
 using kernel::DEFAULT_STOPAFTERBLOCKIMPORT;
 using kernel::DumpMempool;
 using kernel::ValidationCacheSizes;
 
 using node::ApplyArgsManOptions;
 using node::BlockManager;
 using node::CacheSizes;
 using node::CalculateCacheSizes;
 using node::DEFAULT_PERSIST_MEMPOOL;
 using node::fReindex;
 using node::KernelNotifications;
 using node::LoadChainstate;
 using node::MempoolPath;
 using node::NodeContext;
 using node::ShouldPersistMempool;
 using node::ThreadImport;
 using node::VerifyLoadedChainstate;
 
 static const bool DEFAULT_PROXYRANDOMIZE = true;
 static const bool DEFAULT_REST_ENABLE = false;
 static constexpr bool DEFAULT_CHRONIK = false;
 
 #ifdef WIN32
 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing
 // block files don't count towards the fd_set size limit anyway.
 #define MIN_CORE_FILEDESCRIPTORS 0
 #else
 #define MIN_CORE_FILEDESCRIPTORS 150
 #endif
 
 static const char *DEFAULT_ASMAP_FILENAME = "ip_asn.map";
 
 /**
  * The PID file facilities.
  */
 static const char *BITCOIN_PID_FILENAME = "bitcoind.pid";
 
 static fs::path GetPidFile(const ArgsManager &args) {
     return AbsPathForConfigVal(args,
                                args.GetPathArg("-pid", BITCOIN_PID_FILENAME));
 }
 
 [[nodiscard]] static bool CreatePidFile(const ArgsManager &args) {
     std::ofstream file{GetPidFile(args)};
     if (file) {
 #ifdef WIN32
         tfm::format(file, "%d\n", GetCurrentProcessId());
 #else
         tfm::format(file, "%d\n", getpid());
 #endif
         return true;
     } else {
         return InitError(strprintf(_("Unable to create the PID file '%s': %s"),
                                    fs::PathToString(GetPidFile(args)),
                                    SysErrorString(errno)));
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Shutdown
 //
 
 //
 // Thread management and startup/shutdown:
 //
 // The network-processing threads are all part of a thread group created by
 // AppInit() or the Qt main() function.
 //
 // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets
 // fRequestShutdown, which makes main thread's WaitForShutdown() interrupts the
 // thread group.
 // And then, WaitForShutdown() makes all other on-going threads in the thread
 // group join the main thread.
 // Shutdown() is then called to clean up database connections, and stop other
 // threads that should only be stopped after the main network-processing threads
 // have exited.
 //
 // Shutdown for Qt is very similar, only it uses a QTimer to detect
 // ShutdownRequested() getting set, and then does the normal Qt shutdown thing.
 //
 
 void Interrupt(NodeContext &node) {
     InterruptHTTPServer();
     InterruptHTTPRPC();
     InterruptRPC();
     InterruptREST();
     InterruptTorControl();
     InterruptMapPort();
     if (g_avalanche) {
         // Avalanche needs to be stopped before we interrupt the thread group as
         // the scheduler will stop working then.
         g_avalanche->stopEventLoop();
     }
     if (node.connman) {
         node.connman->Interrupt();
     }
     if (g_txindex) {
         g_txindex->Interrupt();
     }
     ForEachBlockFilterIndex([](BlockFilterIndex &index) { index.Interrupt(); });
     if (g_coin_stats_index) {
         g_coin_stats_index->Interrupt();
     }
 }
 
 void Shutdown(NodeContext &node) {
     static Mutex g_shutdown_mutex;
     TRY_LOCK(g_shutdown_mutex, lock_shutdown);
     if (!lock_shutdown) {
         return;
     }
     LogPrintf("%s: In progress...\n", __func__);
     Assert(node.args);
 
     /// Note: Shutdown() must be able to handle cases in which initialization
     /// failed part of the way, for example if the data directory was found to
     /// be locked. Be sure that anything that writes files or flushes caches
     /// only does this if the respective module was initialized.
     util::ThreadRename("shutoff");
     if (node.mempool) {
         node.mempool->AddTransactionsUpdated(1);
     }
 
     StopHTTPRPC();
     StopREST();
     StopRPC();
     StopHTTPServer();
     for (const auto &client : node.chain_clients) {
         client->flush();
     }
     StopMapPort();
 
     // Because avalanche and the network depend on each other, it is important
     // to shut them down in this order:
     // 1. Stop avalanche event loop.
     // 2. Shutdown network processing.
     // 3. Destroy avalanche::Processor.
     // 4. Destroy CConnman
     if (g_avalanche) {
         g_avalanche->stopEventLoop();
     }
 
     // Because these depend on each-other, we make sure that neither can be
     // using the other before destroying them.
     if (node.peerman) {
         UnregisterValidationInterface(node.peerman.get());
     }
     if (node.connman) {
         node.connman->Stop();
     }
 
     StopTorControl();
 
     // After everything has been shut down, but before things get flushed, stop
     // the CScheduler/checkqueue, scheduler and load block thread.
     if (node.scheduler) {
         node.scheduler->stop();
     }
     if (node.chainman && node.chainman->m_load_block.joinable()) {
         node.chainman->m_load_block.join();
     }
     StopScriptCheckWorkerThreads();
 
     // After the threads that potentially access these pointers have been
     // stopped, destruct and reset all to nullptr.
     node.peerman.reset();
 
     // Destroy various global instances
     g_avalanche.reset();
     node.connman.reset();
     node.banman.reset();
     node.addrman.reset();
 
     if (node.mempool && node.mempool->GetLoadTried() &&
         ShouldPersistMempool(*node.args)) {
         DumpMempool(*node.mempool, MempoolPath(*node.args));
     }
 
     // FlushStateToDisk generates a ChainStateFlushed callback, which we should
     // avoid missing
     if (node.chainman) {
         LOCK(cs_main);
         for (Chainstate *chainstate : node.chainman->GetAll()) {
             if (chainstate->CanFlushToDisk()) {
                 chainstate->ForceFlushStateToDisk();
             }
         }
     }
 
     // After there are no more peers/RPC left to give us new data which may
     // generate CValidationInterface callbacks, flush them...
     GetMainSignals().FlushBackgroundCallbacks();
 
 #if ENABLE_CHRONIK
     if (node.args->GetBoolArg("-chronik", DEFAULT_CHRONIK)) {
         chronik::Stop();
     }
 #endif
 
     // Stop and delete all indexes only after flushing background callbacks.
     if (g_txindex) {
         g_txindex->Stop();
         g_txindex.reset();
     }
     if (g_coin_stats_index) {
         g_coin_stats_index->Stop();
         g_coin_stats_index.reset();
     }
     ForEachBlockFilterIndex([](BlockFilterIndex &index) { index.Stop(); });
     DestroyAllBlockFilterIndexes();
 
     // Any future callbacks will be dropped. This should absolutely be safe - if
     // missing a callback results in an unrecoverable situation, unclean
     // shutdown would too. The only reason to do the above flushes is to let the
     // wallet catch up with our current chain to avoid any strange pruning edge
     // cases and make next startup faster by avoiding rescan.
 
     if (node.chainman) {
         LOCK(cs_main);
         for (Chainstate *chainstate : node.chainman->GetAll()) {
             if (chainstate->CanFlushToDisk()) {
                 chainstate->ForceFlushStateToDisk();
                 chainstate->ResetCoinsViews();
             }
         }
     }
     for (const auto &client : node.chain_clients) {
         client->stop();
     }
 
 #if ENABLE_ZMQ
     if (g_zmq_notification_interface) {
         UnregisterValidationInterface(g_zmq_notification_interface.get());
         g_zmq_notification_interface.reset();
     }
 #endif
 
     node.chain_clients.clear();
     UnregisterAllValidationInterfaces();
     GetMainSignals().UnregisterBackgroundSignalScheduler();
     init::UnsetGlobals();
     node.mempool.reset();
     node.chainman.reset();
     node.scheduler.reset();
 
     try {
         if (!fs::remove(GetPidFile(*node.args))) {
             LogPrintf("%s: Unable to remove PID file: File does not exist\n",
                       __func__);
         }
     } catch (const fs::filesystem_error &e) {
         LogPrintf("%s: Unable to remove PID file: %s\n", __func__,
                   fsbridge::get_filesystem_error_message(e));
     }
 
     LogPrintf("%s: done\n", __func__);
 }
 
 /**
  * Signal handlers are very limited in what they are allowed to do.
  * The execution context the handler is invoked in is not guaranteed,
  * so we restrict handler operations to just touching variables:
  */
 #ifndef WIN32
 static void HandleSIGTERM(int) {
     StartShutdown();
 }
 
 static void HandleSIGHUP(int) {
     LogInstance().m_reopen_file = true;
 }
 #else
 static BOOL WINAPI consoleCtrlHandler(DWORD dwCtrlType) {
     StartShutdown();
     Sleep(INFINITE);
     return true;
 }
 #endif
 
 #ifndef WIN32
 static void registerSignalHandler(int signal, void (*handler)(int)) {
     struct sigaction sa;
     sa.sa_handler = handler;
     sigemptyset(&sa.sa_mask);
     sa.sa_flags = 0;
     sigaction(signal, &sa, NULL);
 }
 #endif
 
 static boost::signals2::connection rpc_notify_block_change_connection;
 static void OnRPCStarted() {
     rpc_notify_block_change_connection = uiInterface.NotifyBlockTip_connect(
         std::bind(RPCNotifyBlockChange, std::placeholders::_2));
 }
 
 static void OnRPCStopped() {
     rpc_notify_block_change_connection.disconnect();
     RPCNotifyBlockChange(nullptr);
     g_best_block_cv.notify_all();
     LogPrint(BCLog::RPC, "RPC stopped.\n");
 }
 
 void SetupServerArgs(NodeContext &node) {
     assert(!node.args);
     node.args = &gArgs;
     ArgsManager &argsman = *node.args;
 
     SetupHelpOptions(argsman);
     SetupCurrencyUnitOptions(argsman);
     // server-only for now
     argsman.AddArg("-help-debug",
                    "Print help message with debugging options and exit", false,
                    OptionsCategory::DEBUG_TEST);
 
     init::AddLoggingArgs(argsman);
 
     const auto defaultBaseParams =
         CreateBaseChainParams(CBaseChainParams::MAIN);
     const auto testnetBaseParams =
         CreateBaseChainParams(CBaseChainParams::TESTNET);
     const auto regtestBaseParams =
         CreateBaseChainParams(CBaseChainParams::REGTEST);
     const auto defaultChainParams =
         CreateChainParams(argsman, CBaseChainParams::MAIN);
     const auto testnetChainParams =
         CreateChainParams(argsman, CBaseChainParams::TESTNET);
     const auto regtestChainParams =
         CreateChainParams(argsman, CBaseChainParams::REGTEST);
 
     // Hidden Options
     std::vector<std::string> hidden_args = {
         "-dbcrashratio",
         "-forcecompactdb",
         "-maxaddrtosend",
         "-parkdeepreorg",
         "-automaticunparking",
         "-replayprotectionactivationtime",
         "-enableminerfund",
         "-chronikallowpause",
         "-chronikcors",
         // GUI args. These will be overwritten by SetupUIArgs for the GUI
         "-allowselfsignedrootcertificates",
         "-choosedatadir",
         "-lang=<lang>",
         "-min",
         "-resetguisettings",
         "-rootcertificates=<file>",
         "-splash",
         "-uiplatform",
         // TODO remove after the Nov. 2024 upgrade
         "-augustoactivationtime",
     };
 
     // Set all of the args and their help
     // When adding new options to the categories, please keep and ensure
     // alphabetical ordering. Do not translate _(...) -help-debug options, Many
     // technical terms, and only a very small audience, so is unnecessary stress
     // to translators.
     argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY,
                    OptionsCategory::OPTIONS);
 #if defined(HAVE_SYSTEM)
     argsman.AddArg(
         "-alertnotify=<cmd>",
         "Execute command when a relevant alert is received or we see "
         "a really long fork (%s in cmd is replaced by message)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #endif
     argsman.AddArg(
         "-assumevalid=<hex>",
         strprintf(
             "If this block is in the chain assume that it and its ancestors "
             "are valid and potentially skip their script verification (0 to "
             "verify all, default: %s, testnet: %s)",
             defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(),
             testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-blocksdir=<dir>",
                    "Specify directory to hold blocks subdirectory for *.dat "
                    "files (default: <datadir>)",
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-fastprune",
                    "Use smaller block files and lower minimum prune height for "
                    "testing purposes",
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
 #if defined(HAVE_SYSTEM)
     argsman.AddArg("-blocknotify=<cmd>",
                    "Execute command when the best block changes (%s in cmd is "
                    "replaced by block hash)",
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #endif
     argsman.AddArg("-blockreconstructionextratxn=<n>",
                    strprintf("Extra transactions to keep in memory for compact "
                              "block reconstructions (default: %u)",
                              DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-blocksonly",
         strprintf("Whether to reject transactions from network peers.  "
                   "Automatic broadcast and rebroadcast of any transactions "
                   "from inbound peers is disabled, unless the peer has the "
                   "'forcerelay' permission. RPC transactions are"
                   " not affected. (default: %u)",
                   DEFAULT_BLOCKSONLY),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-coinstatsindex",
                    strprintf("Maintain coinstats index used by the "
                              "gettxoutsetinfo RPC (default: %u)",
                              DEFAULT_COINSTATSINDEX),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-conf=<file>",
         strprintf("Specify path to read-only configuration file. Relative "
                   "paths will be prefixed by datadir location. (default: %s)",
                   BITCOIN_CONF_FILENAME),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-datadir=<dir>", "Specify data directory",
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-dbbatchsize",
         strprintf("Maximum database write batch size in bytes (default: %u)",
                   DEFAULT_DB_BATCH_SIZE),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-dbcache=<n>",
         strprintf("Set database cache size in MiB (%d to %d, default: %d)",
                   MIN_DB_CACHE_MB, MAX_DB_CACHE_MB, DEFAULT_DB_CACHE_MB),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-includeconf=<file>",
         "Specify additional configuration file, relative to the -datadir path "
         "(only useable from configuration file, not command line)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-loadblock=<file>",
                    "Imports blocks from external file on startup",
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-maxmempool=<n>",
                    strprintf("Keep the transaction memory pool below <n> "
                              "megabytes (default: %u)",
                              DEFAULT_MAX_MEMPOOL_SIZE_MB),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-maxorphantx=<n>",
                    strprintf("Keep at most <n> unconnectable transactions in "
                              "memory (default: %u)",
                              DEFAULT_MAX_ORPHAN_TRANSACTIONS),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-mempoolexpiry=<n>",
                    strprintf("Do not keep transactions in the mempool longer "
                              "than <n> hours (default: %u)",
                              DEFAULT_MEMPOOL_EXPIRY_HOURS),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-minimumchainwork=<hex>",
         strprintf(
             "Minimum work assumed to exist on a valid chain in hex "
             "(default: %s, testnet: %s)",
             defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(),
             testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-par=<n>",
         strprintf("Set the number of script verification threads (%u to %d, 0 "
                   "= auto, <0 = leave that many cores free, default: %d)",
                   -GetNumCores(), MAX_SCRIPTCHECK_THREADS,
                   DEFAULT_SCRIPTCHECK_THREADS),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg("-persistmempool",
                    strprintf("Whether to save the mempool on shutdown and load "
                              "on restart (default: %u)",
                              DEFAULT_PERSIST_MEMPOOL),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-pid=<file>",
         strprintf("Specify pid file. Relative paths will be prefixed "
                   "by a net-specific datadir location. (default: %s)",
                   BITCOIN_PID_FILENAME),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-prune=<n>",
         strprintf("Reduce storage requirements by enabling pruning (deleting) "
                   "of old blocks. This allows the pruneblockchain RPC to be "
                   "called to delete specific blocks, and enables automatic "
                   "pruning of old blocks if a target size in MiB is provided. "
                   "This mode is incompatible with -txindex, -coinstatsindex "
                   "and -rescan. Warning: Reverting this setting requires "
                   "re-downloading the entire blockchain. (default: 0 = disable "
                   "pruning blocks, 1 = allow manual pruning via RPC, >=%u = "
                   "automatically prune block files to stay under the specified "
                   "target size in MiB)",
                   MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-reindex-chainstate",
         "Rebuild chain state from the currently indexed blocks. When "
         "in pruning mode or if blocks on disk might be corrupted, use "
         "full -reindex instead.",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-reindex",
         "Rebuild chain state and block index from the blk*.dat files on disk",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-settings=<file>",
         strprintf(
             "Specify path to dynamic settings data file. Can be disabled with "
             "-nosettings. File is written at runtime and not meant to be "
             "edited by users (use %s instead for custom settings). Relative "
             "paths will be prefixed by datadir location. (default: %s)",
             BITCOIN_CONF_FILENAME, BITCOIN_SETTINGS_FILENAME),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #if HAVE_SYSTEM
     argsman.AddArg("-startupnotify=<cmd>", "Execute command on startup.",
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #endif
 #ifndef WIN32
     argsman.AddArg(
         "-sysperms",
         "Create new files with system default permissions, instead of umask "
         "077 (only effective with disabled wallet functionality)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #else
     hidden_args.emplace_back("-sysperms");
 #endif
     argsman.AddArg("-txindex",
                    strprintf("Maintain a full transaction index, used by the "
                              "getrawtransaction rpc call (default: %d)",
                              DEFAULT_TXINDEX),
                    ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #if ENABLE_CHRONIK
     argsman.AddArg(
         "-chronik",
         strprintf("Enable the Chronik indexer, which can be read via a "
                   "dedicated HTTP/Protobuf interface (default: %d)",
                   DEFAULT_CHRONIK),
         ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK);
     argsman.AddArg(
         "-chronikbind=<addr>[:port]",
         strprintf(
             "Bind the Chronik indexer to the given address to listen for "
             "HTTP/Protobuf connections to access the index. Unlike the "
             "JSON-RPC, it's ok to have this publicly exposed on the internet. "
             "This option can be specified multiple times (default: %s; default "
             "port: %u, testnet: %u, regtest: %u)",
             Join(chronik::DEFAULT_BINDS, ", "),
             defaultBaseParams->ChronikPort(), testnetBaseParams->ChronikPort(),
             regtestBaseParams->ChronikPort()),
         ArgsManager::ALLOW_STRING | ArgsManager::NETWORK_ONLY,
         OptionsCategory::CHRONIK);
     argsman.AddArg("-chroniktokenindex",
                    "Enable token indexing in Chronik (default: 1)",
                    ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK);
     argsman.AddArg("-chroniklokadidindex",
                    "Enable LOKAD ID indexing in Chronik (default: 1)",
                    ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK);
     argsman.AddArg("-chronikreindex",
                    "Reindex the Chronik indexer from genesis, but leave the "
                    "other indexes untouched",
                    ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK);
     argsman.AddArg(
         "-chroniktxnumcachebuckets",
         strprintf(
             "Tuning param of the TxNumCache, specifies how many buckets "
             "to use on the belt. Caution against setting this too high, "
             "it may slow down indexing. Set to 0 to disable. (default: %d)",
             chronik::DEFAULT_TX_NUM_CACHE_BUCKETS),
         ArgsManager::ALLOW_INT, OptionsCategory::CHRONIK);
     argsman.AddArg(
         "-chroniktxnumcachebucketsize",
         strprintf(
             "Tuning param of the TxNumCache, specifies the size of each bucket "
             "on the belt. Unlike the number of buckets, this may be increased "
             "without much danger of slowing the indexer down. The total cache "
             "size will be `num_buckets * bucket_size * 40B`, so by default the "
             "cache will require %dkB of memory. (default: %d)",
             chronik::DEFAULT_TX_NUM_CACHE_BUCKETS *
                 chronik::DEFAULT_TX_NUM_CACHE_BUCKET_SIZE * 40 / 1000,
             chronik::DEFAULT_TX_NUM_CACHE_BUCKET_SIZE),
         ArgsManager::ALLOW_INT, OptionsCategory::CHRONIK);
     argsman.AddArg("-chronikperfstats",
                    "Output some performance statistics (e.g. num cache hits, "
                    "seconds spent) into a <datadir>/perf folder. (default: 0)",
                    ArgsManager::ALLOW_BOOL, OptionsCategory::CHRONIK);
 #endif
     argsman.AddArg(
         "-blockfilterindex=<type>",
         strprintf("Maintain an index of compact filters by block "
                   "(default: %s, values: %s).",
                   DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
             " If <type> is not supplied or if <type> = 1, indexes for "
             "all known types are enabled.",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     argsman.AddArg(
         "-usecashaddr",
         "Use Cash Address for destination encoding instead of base58 "
         "(activate by default on Jan, 14)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 
     argsman.AddArg(
         "-addnode=<ip>",
         "Add a node to connect to and attempt to keep the connection "
         "open (see the `addnode` RPC command help for more info)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
         OptionsCategory::CONNECTION);
     argsman.AddArg("-asmap=<file>",
                    strprintf("Specify asn mapping used for bucketing of the "
                              "peers (default: %s). Relative paths will be "
                              "prefixed by the net-specific datadir location.",
                              DEFAULT_ASMAP_FILENAME),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-bantime=<n>",
                    strprintf("Default duration (in seconds) of manually "
                              "configured bans (default: %u)",
                              DEFAULT_MISBEHAVING_BANTIME),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-bind=<addr>[:<port>][=onion]",
         strprintf("Bind to given address and always listen on it (default: "
                   "0.0.0.0). Use [host]:port notation for IPv6. Append =onion "
                   "to tag any incoming connections to that address and port as "
                   "incoming Tor connections (default: 127.0.0.1:%u=onion, "
                   "testnet: 127.0.0.1:%u=onion, regtest: 127.0.0.1:%u=onion)",
                   defaultBaseParams->OnionServiceTargetPort(),
                   testnetBaseParams->OnionServiceTargetPort(),
                   regtestBaseParams->OnionServiceTargetPort()),
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
         OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-connect=<ip>",
         "Connect only to the specified node(s); -connect=0 disables automatic "
         "connections (the rules for this peer are the same as for -addnode)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
         OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-discover",
         "Discover own IP addresses (default: 1 when listening and no "
         "-externalip or -proxy)",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-dns",
                    strprintf("Allow DNS lookups for -addnode, -seednode and "
                              "-connect (default: %d)",
                              DEFAULT_NAME_LOOKUP),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-dnsseed",
         strprintf(
             "Query for peer addresses via DNS lookup, if low on addresses "
             "(default: %u unless -connect used)",
             DEFAULT_DNSSEED),
         ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
     argsman.AddArg("-externalip=<ip>", "Specify your own public address",
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-fixedseeds",
         strprintf(
             "Allow fixed seeds if DNS seeds don't provide peers (default: %u)",
             DEFAULT_FIXEDSEEDS),
         ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-forcednsseed",
         strprintf(
             "Always query for peer addresses via DNS lookup (default: %d)",
             DEFAULT_FORCEDNSSEED),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-overridednsseed",
                    "If set, only use the specified DNS seed when "
                    "querying for peer addresses via DNS lookup.",
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-listen",
         "Accept connections from outside (default: 1 if no -proxy or -connect)",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-listenonion",
         strprintf("Automatically create Tor onion service (default: %d)",
                   DEFAULT_LISTEN_ONION),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-maxconnections=<n>",
         strprintf("Maintain at most <n> connections to peers. The effective "
                   "limit depends on system limitations and might be lower than "
                   "the specified value (default: %u)",
                   DEFAULT_MAX_PEER_CONNECTIONS),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-maxreceivebuffer=<n>",
                    strprintf("Maximum per-connection receive buffer, <n>*1000 "
                              "bytes (default: %u)",
                              DEFAULT_MAXRECEIVEBUFFER),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-maxsendbuffer=<n>",
         strprintf(
             "Maximum per-connection send buffer, <n>*1000 bytes (default: %u)",
             DEFAULT_MAXSENDBUFFER),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-maxtimeadjustment",
         strprintf("Maximum allowed median peer time offset adjustment. Local "
                   "perspective of time may be influenced by peers forward or "
                   "backward by this amount. (default: %u seconds)",
                   DEFAULT_MAX_TIME_ADJUSTMENT),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-onion=<ip:port>",
                    strprintf("Use separate SOCKS5 proxy to reach peers via Tor "
                              "onion services (default: %s)",
                              "-proxy"),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-i2psam=<ip:port>",
                    "I2P SAM proxy to reach I2P peers and accept I2P "
                    "connections (default: none)",
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-i2pacceptincoming",
         "If set and -i2psam is also set then incoming I2P connections are "
         "accepted via the SAM proxy. If this is not set but -i2psam is set "
         "then only outgoing connections will be made to the I2P network. "
         "Ignored if -i2psam is not set. Listening for incoming I2P connections "
         "is done through the SAM proxy, not by binding to a local address and "
         "port (default: 1)",
         ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
 
     argsman.AddArg(
         "-onlynet=<net>",
         "Make outgoing connections only through network <net> (" +
             Join(GetNetworkNames(), ", ") +
             "). Incoming connections are not affected by this option. This "
             "option can be specified multiple times to allow multiple "
             "networks. Warning: if it is used with non-onion networks "
             "and the -onion or -proxy option is set, then outbound onion "
             "connections will still be made; use -noonion or -onion=0 to "
             "disable outbound onion connections in this case",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-peerbloomfilters",
                    strprintf("Support filtering of blocks and transaction with "
                              "bloom filters (default: %d)",
                              DEFAULT_PEERBLOOMFILTERS),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-peerblockfilters",
         strprintf(
             "Serve compact block filters to peers per BIP 157 (default: %u)",
             DEFAULT_PEERBLOCKFILTERS),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-permitbaremultisig",
                    strprintf("Relay non-P2SH multisig (default: %d)",
                              DEFAULT_PERMIT_BAREMULTISIG),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     // TODO: remove the sentence "Nodes not using ... incoming connections."
     // once the changes from https://github.com/bitcoin/bitcoin/pull/23542 have
     // become widespread.
     argsman.AddArg("-port=<port>",
                    strprintf("Listen for connections on <port>. Nodes not "
                              "using the default ports (default: %u, "
                              "testnet: %u, regtest: %u) are unlikely to get "
                              "incoming connections.  Not relevant for I2P (see "
                              "doc/i2p.md).",
                              defaultChainParams->GetDefaultPort(),
                              testnetChainParams->GetDefaultPort(),
                              regtestChainParams->GetDefaultPort()),
                    ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                    OptionsCategory::CONNECTION);
     argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy",
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-proxyrandomize",
         strprintf("Randomize credentials for every proxy connection. "
                   "This enables Tor stream isolation (default: %d)",
                   DEFAULT_PROXYRANDOMIZE),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-seednode=<ip>",
         "Connect to a node to retrieve peer addresses, and disconnect",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-networkactive",
         "Enable all P2P network activity (default: 1). Can be changed "
         "by the setnetworkactive RPC command",
         ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
     argsman.AddArg("-timeout=<n>",
                    strprintf("Specify connection timeout in milliseconds "
                              "(minimum: 1, default: %d)",
                              DEFAULT_CONNECT_TIMEOUT),
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-peertimeout=<n>",
         strprintf("Specify p2p connection timeout in seconds. This option "
                   "determines the amount of time a peer may be inactive before "
                   "the connection to it is dropped. (minimum: 1, default: %d)",
                   DEFAULT_PEER_CONNECT_TIMEOUT),
         true, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-torcontrol=<ip>:<port>",
         strprintf(
             "Tor control port to use if onion listening enabled (default: %s)",
             DEFAULT_TOR_CONTROL),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg("-torpassword=<pass>",
                    "Tor control port password (default: empty)",
                    ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                    OptionsCategory::CONNECTION);
 #ifdef USE_UPNP
 #if USE_UPNP
     argsman.AddArg("-upnp",
                    "Use UPnP to map the listening port (default: 1 when "
                    "listening and no -proxy)",
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 #else
     argsman.AddArg(
         "-upnp",
         strprintf("Use UPnP to map the listening port (default: %u)", 0),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 #endif
 #else
     hidden_args.emplace_back("-upnp");
 #endif
 #ifdef USE_NATPMP
     argsman.AddArg(
         "-natpmp",
         strprintf("Use NAT-PMP to map the listening port (default: %s)",
                   DEFAULT_NATPMP ? "1 when listening and no -proxy" : "0"),
         ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION);
 #else
     hidden_args.emplace_back("-natpmp");
 #endif // USE_NATPMP
     argsman.AddArg(
         "-whitebind=<[permissions@]addr>",
         "Bind to the given address and add permission flags to the peers "
         "connecting to it."
         "Use [host]:port notation for IPv6. Allowed permissions: " +
             Join(NET_PERMISSIONS_DOC, ", ") +
             ". "
             "Specify multiple permissions separated by commas (default: "
             "download,noban,mempool,relay). Can be specified multiple times.",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 
     argsman.AddArg("-whitelist=<[permissions@]IP address or network>",
                    "Add permission flags to the peers connecting from the "
                    "given IP address (e.g. 1.2.3.4) or CIDR-notated network "
                    "(e.g. 1.2.3.0/24). "
                    "Uses the same permissions as -whitebind. Can be specified "
                    "multiple times.",
                    ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     argsman.AddArg(
         "-maxuploadtarget=<n>",
         strprintf("Tries to keep outbound traffic under the given target (in "
                   "MiB per 24h). Limit does not apply to peers with 'download' "
                   "permission. 0 = no limit (default: %d)",
                   DEFAULT_MAX_UPLOAD_TARGET),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 
     g_wallet_init_interface.AddWalletOptions(argsman);
 
 #if ENABLE_ZMQ
     argsman.AddArg("-zmqpubhashblock=<address>",
                    "Enable publish hash block in <address>",
                    ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg("-zmqpubhashtx=<address>",
                    "Enable publish hash transaction in <address>",
                    ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg("-zmqpubrawblock=<address>",
                    "Enable publish raw block in <address>",
                    ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg("-zmqpubrawtx=<address>",
                    "Enable publish raw transaction in <address>",
                    ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg("-zmqpubsequence=<address>",
                    "Enable publish hash block and tx sequence in <address>",
                    ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg(
         "-zmqpubhashblockhwm=<n>",
         strprintf("Set publish hash block outbound message high water "
                   "mark (default: %d)",
                   CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
         ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg(
         "-zmqpubhashtxhwm=<n>",
         strprintf("Set publish hash transaction outbound message high "
                   "water mark (default: %d)",
                   CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
         false, OptionsCategory::ZMQ);
     argsman.AddArg(
         "-zmqpubrawblockhwm=<n>",
         strprintf("Set publish raw block outbound message high water "
                   "mark (default: %d)",
                   CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
         ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg(
         "-zmqpubrawtxhwm=<n>",
         strprintf("Set publish raw transaction outbound message high "
                   "water mark (default: %d)",
                   CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
         ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     argsman.AddArg("-zmqpubsequencehwm=<n>",
                    strprintf("Set publish hash sequence message high water mark"
                              " (default: %d)",
                              CZMQAbstractNotifier::DEFAULT_ZMQ_SNDHWM),
                    ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
 #else
     hidden_args.emplace_back("-zmqpubhashblock=<address>");
     hidden_args.emplace_back("-zmqpubhashtx=<address>");
     hidden_args.emplace_back("-zmqpubrawblock=<address>");
     hidden_args.emplace_back("-zmqpubrawtx=<address>");
     hidden_args.emplace_back("-zmqpubsequence=<n>");
     hidden_args.emplace_back("-zmqpubhashblockhwm=<n>");
     hidden_args.emplace_back("-zmqpubhashtxhwm=<n>");
     hidden_args.emplace_back("-zmqpubrawblockhwm=<n>");
     hidden_args.emplace_back("-zmqpubrawtxhwm=<n>");
     hidden_args.emplace_back("-zmqpubsequencehwm=<n>");
 #endif
 
     argsman.AddArg(
         "-checkblocks=<n>",
         strprintf("How many blocks to check at startup (default: %u, 0 = all)",
                   DEFAULT_CHECKBLOCKS),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-checklevel=<n>",
                    strprintf("How thorough the block verification of "
                              "-checkblocks is: %s (0-4, default: %u)",
                              Join(CHECKLEVEL_DOC, ", "), DEFAULT_CHECKLEVEL),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-checkblockindex",
                    strprintf("Do a consistency check for the block tree, "
                              "chainstate, and other validation data structures "
                              "occasionally. (default: %u, regtest: %u)",
                              defaultChainParams->DefaultConsistencyChecks(),
                              regtestChainParams->DefaultConsistencyChecks()),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-checkaddrman=<n>",
                    strprintf("Run addrman consistency checks every <n> "
                              "operations. Use 0 to disable. (default: %u)",
                              DEFAULT_ADDRMAN_CONSISTENCY_CHECKS),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg(
         "-checkmempool=<n>",
         strprintf("Run mempool consistency checks every <n> transactions. Use "
                   "0 to disable. (default: %u, regtest: %u)",
                   defaultChainParams->DefaultConsistencyChecks(),
                   regtestChainParams->DefaultConsistencyChecks()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-checkpoints",
                    strprintf("Only accept block chain matching built-in "
                              "checkpoints (default: %d)",
                              DEFAULT_CHECKPOINTS_ENABLED),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-deprecatedrpc=<method>",
                    "Allows deprecated RPC method(s) to be used",
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg(
         "-stopafterblockimport",
         strprintf("Stop running after importing blocks from disk (default: %d)",
                   DEFAULT_STOPAFTERBLOCKIMPORT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-stopatheight",
                    strprintf("Stop running after reaching the given height in "
                              "the main chain (default: %u)",
                              DEFAULT_STOPATHEIGHT),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-addrmantest", "Allows to test address relay on localhost",
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-capturemessages", "Capture all P2P messages to disk",
                    ArgsManager::ALLOW_BOOL | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-mocktime=<n>",
                    "Replace actual time with " + UNIX_EPOCH_TIME +
                        " (default: 0)",
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
     argsman.AddArg(
         "-maxsigcachesize=<n>",
         strprintf("Limit size of signature cache to <n> MiB (default: %u)",
                   DEFAULT_MAX_SIG_CACHE_BYTES >> 20),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     argsman.AddArg(
         "-maxscriptcachesize=<n>",
         strprintf("Limit size of script cache to <n> MiB (default: %u)",
                   DEFAULT_MAX_SCRIPT_CACHE_BYTES >> 20),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-maxtipage=<n>",
                    strprintf("Maximum tip age in seconds to consider node in "
                              "initial block download (default: %u)",
                              Ticks<std::chrono::seconds>(DEFAULT_MAX_TIP_AGE)),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::DEBUG_TEST);
 
     argsman.AddArg("-uacomment=<cmt>",
                    "Append comment to the user agent string",
                    ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-uaclientname=<clientname>", "Set user agent client name",
                    ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     argsman.AddArg("-uaclientversion=<clientversion>",
                    "Set user agent client version", ArgsManager::ALLOW_ANY,
                    OptionsCategory::DEBUG_TEST);
 
     SetupChainParamsBaseOptions(argsman);
 
     argsman.AddArg(
         "-acceptnonstdtxn",
         strprintf(
             "Relay and mine \"non-standard\" transactions (%sdefault: %u)",
             "testnet/regtest only; ", defaultChainParams->RequireStandard()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::NODE_RELAY);
     argsman.AddArg("-excessiveblocksize=<n>",
                    strprintf("Do not accept blocks larger than this limit, in "
                              "bytes (default: %d)",
                              DEFAULT_MAX_BLOCK_SIZE),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::NODE_RELAY);
     const auto &ticker = Currency::get().ticker;
     argsman.AddArg(
         "-dustrelayfee=<amt>",
         strprintf("Fee rate (in %s/kB) used to define dust, the value of an "
                   "output such that it will cost about 1/3 of its value in "
                   "fees at this fee rate to spend it. (default: %s)",
                   ticker, FormatMoney(DUST_RELAY_TX_FEE)),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::NODE_RELAY);
 
     argsman.AddArg(
         "-bytespersigcheck",
         strprintf("Equivalent bytes per sigCheck in transactions for relay and "
                   "mining (default: %u).",
                   DEFAULT_BYTES_PER_SIGCHECK),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     argsman.AddArg(
         "-bytespersigop",
         strprintf("DEPRECATED: Equivalent bytes per sigCheck in transactions "
                   "for relay and mining (default: %u). This has been "
                   "deprecated since v0.26.8 and will be removed in the future, "
                   "please use -bytespersigcheck instead.",
                   DEFAULT_BYTES_PER_SIGCHECK),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     argsman.AddArg(
         "-datacarrier",
         strprintf("Relay and mine data carrier transactions (default: %d)",
                   DEFAULT_ACCEPT_DATACARRIER),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     argsman.AddArg(
         "-datacarriersize",
         strprintf("Maximum size of data in data carrier transactions "
                   "we relay and mine (default: %u)",
                   MAX_OP_RETURN_RELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     argsman.AddArg(
         "-minrelaytxfee=<amt>",
         strprintf("Fees (in %s/kB) smaller than this are rejected for "
                   "relaying, mining and transaction creation (default: %s)",
                   ticker, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE_PER_KB)),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     argsman.AddArg(
         "-whitelistrelay",
         strprintf("Add 'relay' permission to whitelisted inbound peers "
                   "with default permissions. This will accept relayed "
                   "transactions even when not relaying transactions "
                   "(default: %d)",
                   DEFAULT_WHITELISTRELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     argsman.AddArg(
         "-whitelistforcerelay",
         strprintf("Add 'forcerelay' permission to whitelisted inbound peers"
                   " with default permissions. This will relay transactions "
                   "even if the transactions were already in the mempool "
                   "(default: %d)",
                   DEFAULT_WHITELISTFORCERELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
 
     argsman.AddArg("-blockmaxsize=<n>",
                    strprintf("Set maximum block size in bytes (default: %d)",
                              DEFAULT_MAX_GENERATED_BLOCK_SIZE),
                    ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
     argsman.AddArg(
         "-blockmintxfee=<amt>",
         strprintf("Set lowest fee rate (in %s/kB) for transactions to "
                   "be included in block creation. (default: %s)",
                   ticker, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB)),
         ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
 
     argsman.AddArg("-blockversion=<n>",
                    "Override block version to test forking scenarios",
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::BLOCK_CREATION);
 
     argsman.AddArg("-server", "Accept command line and JSON-RPC commands",
                    ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     argsman.AddArg("-rest",
                    strprintf("Accept public REST requests (default: %d)",
                              DEFAULT_REST_ENABLE),
                    ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcbind=<addr>[:port]",
         "Bind to given address to listen for JSON-RPC connections. Do not "
         "expose the RPC server to untrusted networks such as the public "
         "internet! This option is ignored unless -rpcallowip is also passed. "
         "Port is optional and overrides -rpcport.  Use [host]:port notation "
         "for IPv6. This option can be specified multiple times (default: "
         "127.0.0.1 and ::1 i.e., localhost)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY |
             ArgsManager::SENSITIVE,
         OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcdoccheck",
         strprintf("Throw a non-fatal error at runtime if the documentation for "
                   "an RPC is incorrect (default: %u)",
                   DEFAULT_RPC_DOC_CHECK),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
     argsman.AddArg(
         "-rpccookiefile=<loc>",
         "Location of the auth cookie. Relative paths will be prefixed "
         "by a net-specific datadir location. (default: data dir)",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections",
                    ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                    OptionsCategory::RPC);
     argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections",
                    ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                    OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcwhitelist=<whitelist>",
         "Set a whitelist to filter incoming RPC calls for a specific user. The "
         "field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc "
         "2>,...,<rpc n>. If multiple whitelists are set for a given user, they "
         "are set-intersected. See -rpcwhitelistdefault documentation for "
         "information on default whitelist behavior.",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcwhitelistdefault",
         "Sets default behavior for rpc whitelisting. Unless "
         "rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc "
         "server acts as if all rpc users are subject to "
         "empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault "
         "is set to 1 and no -rpcwhitelist is set, rpc server acts as if all "
         "rpc users are subject to empty whitelists.",
         ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcauth=<userpw>",
         "Username and HMAC-SHA-256 hashed password for JSON-RPC connections. "
         "The field <userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A "
         "canonical python script is included in share/rpcauth. The client then "
         "connects normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> "
         "pair of arguments. This option can be specified multiple times",
         ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
     argsman.AddArg("-rpcport=<port>",
                    strprintf("Listen for JSON-RPC connections on <port> "
                              "(default: %u, testnet: %u, regtest: %u)",
                              defaultBaseParams->RPCPort(),
                              testnetBaseParams->RPCPort(),
                              regtestBaseParams->RPCPort()),
                    ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                    OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcallowip=<ip>",
         "Allow JSON-RPC connections from specified source. Valid for "
         "<ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. "
         "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). "
         "This option can be specified multiple times",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     argsman.AddArg(
         "-rpcthreads=<n>",
         strprintf(
             "Set the number of threads to service RPC calls (default: %d)",
             DEFAULT_HTTP_THREADS),
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     argsman.AddArg(
         "-rpccorsdomain=value",
         "Domain from which to accept cross origin requests (browser enforced)",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
 
     argsman.AddArg("-rpcworkqueue=<n>",
                    strprintf("Set the depth of the work queue to service RPC "
                              "calls (default: %d)",
                              DEFAULT_HTTP_WORKQUEUE),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::RPC);
     argsman.AddArg("-rpcservertimeout=<n>",
                    strprintf("Timeout during HTTP requests (default: %d)",
                              DEFAULT_HTTP_SERVER_TIMEOUT),
                    ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                    OptionsCategory::RPC);
 
 #if HAVE_DECL_FORK
     argsman.AddArg("-daemon",
                    strprintf("Run in the background as a daemon and accept "
                              "commands (default: %d)",
                              DEFAULT_DAEMON),
                    ArgsManager::ALLOW_BOOL, OptionsCategory::OPTIONS);
     argsman.AddArg("-daemonwait",
                    strprintf("Wait for initialization to be finished before "
                              "exiting. This implies -daemon (default: %d)",
                              DEFAULT_DAEMONWAIT),
                    ArgsManager::ALLOW_BOOL, OptionsCategory::OPTIONS);
 #else
     hidden_args.emplace_back("-daemon");
     hidden_args.emplace_back("-daemonwait");
 #endif
 
     // Avalanche options.
     argsman.AddArg("-avalanche",
                    strprintf("Enable the avalanche feature (default: %u)",
                              AVALANCHE_DEFAULT_ENABLED),
                    ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avalanchestakingrewards",
         strprintf("Enable the avalanche staking rewards feature (default: %u, "
                   "testnet: %u, regtest: %u)",
                   defaultChainParams->GetConsensus().enableStakingRewards,
                   testnetChainParams->GetConsensus().enableStakingRewards,
                   regtestChainParams->GetConsensus().enableStakingRewards),
         ArgsManager::ALLOW_BOOL, OptionsCategory::AVALANCHE);
     argsman.AddArg("-avalancheconflictingproofcooldown",
                    strprintf("Mandatory cooldown before a proof conflicting "
                              "with an already registered one can be considered "
                              "in seconds (default: %u)",
                              AVALANCHE_DEFAULT_CONFLICTING_PROOF_COOLDOWN),
                    ArgsManager::ALLOW_INT, OptionsCategory::AVALANCHE);
     argsman.AddArg("-avalanchepeerreplacementcooldown",
                    strprintf("Mandatory cooldown before a peer can be replaced "
                              "in seconds (default: %u)",
                              AVALANCHE_DEFAULT_PEER_REPLACEMENT_COOLDOWN),
                    ArgsManager::ALLOW_INT, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avaminquorumstake",
         strprintf(
             "Minimum amount of known stake for a usable quorum (default: %s)",
             FormatMoney(AVALANCHE_DEFAULT_MIN_QUORUM_STAKE)),
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avaminquorumconnectedstakeratio",
         strprintf("Minimum proportion of known stake we"
                   " need nodes for to have a usable quorum (default: %s)",
                   AVALANCHE_DEFAULT_MIN_QUORUM_CONNECTED_STAKE_RATIO),
         ArgsManager::ALLOW_STRING, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avaminavaproofsnodecount",
         strprintf("Minimum number of node that needs to send us an avaproofs"
                   " message before we consider we have a usable quorum"
                   " (default: %s)",
                   AVALANCHE_DEFAULT_MIN_AVAPROOFS_NODE_COUNT),
         ArgsManager::ALLOW_INT, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avastalevotethreshold",
         strprintf("Number of avalanche votes before a voted item goes stale "
                   "when voting confidence is low (default: %u)",
                   AVALANCHE_VOTE_STALE_THRESHOLD),
         ArgsManager::ALLOW_INT, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avastalevotefactor",
         strprintf(
             "Factor affecting the number of avalanche votes before a voted "
             "item goes stale when voting confidence is high (default: %u)",
             AVALANCHE_VOTE_STALE_FACTOR),
         ArgsManager::ALLOW_INT, OptionsCategory::AVALANCHE);
     argsman.AddArg("-avacooldown",
                    strprintf("Mandatory cooldown between two avapoll in "
                              "milliseconds (default: %u)",
                              AVALANCHE_DEFAULT_COOLDOWN),
                    ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avatimeout",
         strprintf("Avalanche query timeout in milliseconds (default: %u)",
                   AVALANCHE_DEFAULT_QUERY_TIMEOUT.count()),
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avadelegation",
         "Avalanche proof delegation to the master key used by this node "
         "(default: none). Should be used in conjunction with -avaproof and "
         "-avamasterkey",
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     argsman.AddArg("-avaproof",
                    "Avalanche proof to be used by this node (default: none)",
                    ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-avaproofstakeutxoconfirmations",
         strprintf(
             "Minimum number of confirmations before a stake utxo is mature"
             " enough to be included into a proof. Utxos in the mempool are not "
             "accepted (i.e this value must be greater than 0) (default: %s)",
             AVALANCHE_DEFAULT_STAKE_UTXO_CONFIRMATIONS),
         ArgsManager::ALLOW_INT, OptionsCategory::HIDDEN);
     argsman.AddArg("-avaproofstakeutxodustthreshold",
                    strprintf("Minimum value each stake utxo must have to be "
                              "considered valid (default: %s)",
                              avalanche::PROOF_DUST_THRESHOLD),
                    ArgsManager::ALLOW_ANY, OptionsCategory::HIDDEN);
     argsman.AddArg("-avamasterkey",
                    "Master key associated with the proof. If a proof is "
                    "required, this is mandatory.",
                    ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                    OptionsCategory::AVALANCHE);
     argsman.AddArg("-avasessionkey", "Avalanche session key (default: random)",
                    ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                    OptionsCategory::HIDDEN);
     argsman.AddArg(
         "-maxavalancheoutbound",
         strprintf(
             "Set the maximum number of avalanche outbound peers to connect to. "
             "Note that this option takes precedence over the -maxconnections "
             "option (default: %u).",
             DEFAULT_MAX_AVALANCHE_OUTBOUND_CONNECTIONS),
         ArgsManager::ALLOW_INT, OptionsCategory::AVALANCHE);
     argsman.AddArg(
         "-persistavapeers",
         strprintf("Whether to save the avalanche peers upon shutdown and load "
                   "them upon startup (default: %u).",
                   DEFAULT_PERSIST_AVAPEERS),
         ArgsManager::ALLOW_BOOL, OptionsCategory::AVALANCHE);
 
     hidden_args.emplace_back("-avalanchepreconsensus");
 
     // Add the hidden options
     argsman.AddHiddenArgs(hidden_args);
 }
 
 static bool fHaveGenesis = false;
 static GlobalMutex g_genesis_wait_mutex;
 static std::condition_variable g_genesis_wait_cv;
 
 static void BlockNotifyGenesisWait(const CBlockIndex *pBlockIndex) {
     if (pBlockIndex != nullptr) {
         {
             LOCK(g_genesis_wait_mutex);
             fHaveGenesis = true;
         }
         g_genesis_wait_cv.notify_all();
     }
 }
 
 #if HAVE_SYSTEM
 static void StartupNotify(const ArgsManager &args) {
     std::string cmd = args.GetArg("-startupnotify", "");
     if (!cmd.empty()) {
         std::thread t(runCommand, cmd);
         // thread runs free
         t.detach();
     }
 }
 #endif
 
 static bool AppInitServers(Config &config,
                            HTTPRPCRequestProcessor &httpRPCRequestProcessor,
                            NodeContext &node) {
     const ArgsManager &args = *Assert(node.args);
     RPCServerSignals::OnStarted(&OnRPCStarted);
     RPCServerSignals::OnStopped(&OnRPCStopped);
     if (!InitHTTPServer(config)) {
         return false;
     }
 
     StartRPC();
     node.rpc_interruption_point = RpcInterruptionPoint;
 
     if (!StartHTTPRPC(httpRPCRequestProcessor)) {
         return false;
     }
     if (args.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) {
         StartREST(&node);
     }
 
     StartHTTPServer();
     return true;
 }
 
 // Parameter interaction based on rules
 void InitParameterInteraction(ArgsManager &args) {
     // when specifying an explicit binding address, you want to listen on it
     // even when -connect or -proxy is specified.
     if (args.IsArgSet("-bind")) {
         if (args.SoftSetBoolArg("-listen", true)) {
             LogPrintf(
                 "%s: parameter interaction: -bind set -> setting -listen=1\n",
                 __func__);
         }
     }
     if (args.IsArgSet("-whitebind")) {
         if (args.SoftSetBoolArg("-listen", true)) {
             LogPrintf("%s: parameter interaction: -whitebind set -> setting "
                       "-listen=1\n",
                       __func__);
         }
     }
 
     if (args.IsArgSet("-connect")) {
         // when only connecting to trusted nodes, do not seed via DNS, or listen
         // by default.
         if (args.SoftSetBoolArg("-dnsseed", false)) {
             LogPrintf("%s: parameter interaction: -connect set -> setting "
                       "-dnsseed=0\n",
                       __func__);
         }
         if (args.SoftSetBoolArg("-listen", false)) {
             LogPrintf("%s: parameter interaction: -connect set -> setting "
                       "-listen=0\n",
                       __func__);
         }
     }
 
     if (args.IsArgSet("-proxy")) {
         // to protect privacy, do not listen by default if a default proxy
         // server is specified.
         if (args.SoftSetBoolArg("-listen", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -listen=0\n",
                 __func__);
         }
         // to protect privacy, do not map ports when a proxy is set. The user
         // may still specify -listen=1 to listen locally, so don't rely on this
         // happening through -listen below.
         if (args.SoftSetBoolArg("-upnp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -upnp=0\n",
                 __func__);
         }
         if (args.SoftSetBoolArg("-natpmp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -natpmp=0\n",
                 __func__);
         }
         // to protect privacy, do not discover addresses by default
         if (args.SoftSetBoolArg("-discover", false)) {
             LogPrintf("%s: parameter interaction: -proxy set -> setting "
                       "-discover=0\n",
                       __func__);
         }
     }
 
     if (!args.GetBoolArg("-listen", DEFAULT_LISTEN)) {
         // do not map ports or try to retrieve public IP when not listening
         // (pointless)
         if (args.SoftSetBoolArg("-upnp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -upnp=0\n",
                 __func__);
         }
         if (args.SoftSetBoolArg("-natpmp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -natpmp=0\n",
                 __func__);
         }
         if (args.SoftSetBoolArg("-discover", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -discover=0\n",
                 __func__);
         }
         if (args.SoftSetBoolArg("-listenonion", false)) {
             LogPrintf("%s: parameter interaction: -listen=0 -> setting "
                       "-listenonion=0\n",
                       __func__);
         }
         if (args.SoftSetBoolArg("-i2pacceptincoming", false)) {
             LogPrintf("%s: parameter interaction: -listen=0 -> setting "
                       "-i2pacceptincoming=0\n",
                       __func__);
         }
     }
 
     if (args.IsArgSet("-externalip")) {
         // if an explicit public IP is specified, do not try to find others
         if (args.SoftSetBoolArg("-discover", false)) {
             LogPrintf("%s: parameter interaction: -externalip set -> setting "
                       "-discover=0\n",
                       __func__);
         }
     }
 
     // disable whitelistrelay in blocksonly mode
     if (args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
         if (args.SoftSetBoolArg("-whitelistrelay", false)) {
             LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting "
                       "-whitelistrelay=0\n",
                       __func__);
         }
     }
 
     // Forcing relay from whitelisted hosts implies we will accept relays from
     // them in the first place.
     if (args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
         if (args.SoftSetBoolArg("-whitelistrelay", true)) {
             LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> "
                       "setting -whitelistrelay=1\n",
                       __func__);
         }
     }
 
     // If avalanche is set, soft set all the feature flags accordingly.
     if (args.IsArgSet("-avalanche")) {
         const bool fAvalanche =
             args.GetBoolArg("-avalanche", AVALANCHE_DEFAULT_ENABLED);
         args.SoftSetBoolArg("-automaticunparking", !fAvalanche);
     }
 }
 
 /**
  * Initialize global loggers.
  *
  * Note that this is called very early in the process lifetime, so you should be
  * careful about what global state you rely on here.
  */
 void InitLogging(const ArgsManager &args) {
     init::SetLoggingOptions(args);
     init::LogPackageVersion();
 }
 
 namespace { // Variables internal to initialization process only
 
 int nMaxConnections;
 int nUserMaxConnections;
 int nFD;
 ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED);
 int64_t peer_connect_timeout;
 std::set<BlockFilterType> g_enabled_filter_types;
 
 } // namespace
 
 [[noreturn]] static void new_handler_terminate() {
     // Rather than throwing std::bad-alloc if allocation fails, terminate
     // immediately to (try to) avoid chain corruption. Since LogPrintf may
     // itself allocate memory, set the handler directly to terminate first.
     std::set_new_handler(std::terminate);
     LogPrintf("Error: Out of memory. Terminating.\n");
 
     // The log was successful, terminate now.
     std::terminate();
 };
 
 bool AppInitBasicSetup(const ArgsManager &args) {
 // Step 1: setup
 #ifdef _MSC_VER
     // Turn off Microsoft heap dump noise
     _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
     _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr,
                                              OPEN_EXISTING, 0, 0));
     // Disable confusing "helpful" text message on abort, Ctrl-C
     _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
 #endif
 #ifdef WIN32
     // Enable Data Execution Prevention (DEP)
     SetProcessDEPPolicy(PROCESS_DEP_ENABLE);
 #endif
     if (!InitShutdownState()) {
         return InitError(
             Untranslated("Initializing wait-for-shutdown state failed."));
     }
 
     if (!SetupNetworking()) {
         return InitError(Untranslated("Initializing networking failed"));
     }
 
 #ifndef WIN32
     if (!args.GetBoolArg("-sysperms", false)) {
         umask(077);
     }
 
     // Clean shutdown on SIGTERM
     registerSignalHandler(SIGTERM, HandleSIGTERM);
     registerSignalHandler(SIGINT, HandleSIGTERM);
 
     // Reopen debug.log on SIGHUP
     registerSignalHandler(SIGHUP, HandleSIGHUP);
 
     // Ignore SIGPIPE, otherwise it will bring the daemon down if the client
     // closes unexpectedly
     signal(SIGPIPE, SIG_IGN);
 #else
     SetConsoleCtrlHandler(consoleCtrlHandler, true);
 #endif
 
     std::set_new_handler(new_handler_terminate);
 
     return true;
 }
 
 bool AppInitParameterInteraction(Config &config, const ArgsManager &args) {
     const CChainParams &chainparams = config.GetChainParams();
     // Step 2: parameter interactions
 
     // also see: InitParameterInteraction()
 
     // Error if network-specific options (-addnode, -connect, etc) are
     // specified in default section of config file, but not overridden
     // on the command line or in this network's section of the config file.
     std::string network = args.GetChainName();
     bilingual_str errors;
     for (const auto &arg : args.GetUnsuitableSectionOnlyArgs()) {
         errors += strprintf(_("Config setting for %s only applied on %s "
                               "network when in [%s] section.") +
                                 Untranslated("\n"),
                             arg, network, network);
     }
 
     if (!errors.empty()) {
         return InitError(errors);
     }
 
     // Warn if unrecognized section name are present in the config file.
     bilingual_str warnings;
     for (const auto &section : args.GetUnrecognizedSections()) {
         warnings += strprintf(Untranslated("%s:%i ") +
                                   _("Section [%s] is not recognized.") +
                                   Untranslated("\n"),
                               section.m_file, section.m_line, section.m_name);
     }
 
     if (!warnings.empty()) {
         InitWarning(warnings);
     }
 
     if (!fs::is_directory(args.GetBlocksDirPath())) {
         return InitError(
             strprintf(_("Specified blocks directory \"%s\" does not exist."),
                       args.GetArg("-blocksdir", "")));
     }
 
     // parse and validate enabled filter types
     std::string blockfilterindex_value =
         args.GetArg("-blockfilterindex", DEFAULT_BLOCKFILTERINDEX);
     if (blockfilterindex_value == "" || blockfilterindex_value == "1") {
         g_enabled_filter_types = AllBlockFilterTypes();
     } else if (blockfilterindex_value != "0") {
         const std::vector<std::string> names =
             args.GetArgs("-blockfilterindex");
         for (const auto &name : names) {
             BlockFilterType filter_type;
             if (!BlockFilterTypeByName(name, filter_type)) {
                 return InitError(
                     strprintf(_("Unknown -blockfilterindex value %s."), name));
             }
             g_enabled_filter_types.insert(filter_type);
         }
     }
 
     // Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index
     // are both enabled.
     if (args.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)) {
         if (g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) {
             return InitError(
                 _("Cannot set -peerblockfilters without -blockfilterindex."));
         }
 
         nLocalServices = ServiceFlags(nLocalServices | NODE_COMPACT_FILTERS);
     }
 
     // if using block pruning, then disallow txindex, coinstatsindex and chronik
     if (args.GetIntArg("-prune", 0)) {
         if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
             return InitError(_("Prune mode is incompatible with -txindex."));
         }
         if (args.GetBoolArg("-coinstatsindex", DEFAULT_COINSTATSINDEX)) {
             return InitError(
                 _("Prune mode is incompatible with -coinstatsindex."));
         }
         if (args.GetBoolArg("-chronik", DEFAULT_CHRONIK)) {
             return InitError(_("Prune mode is incompatible with -chronik."));
         }
     }
 
     // -bind and -whitebind can't be set when not listening
     size_t nUserBind =
         args.GetArgs("-bind").size() + args.GetArgs("-whitebind").size();
     if (nUserBind != 0 && !args.GetBoolArg("-listen", DEFAULT_LISTEN)) {
         return InitError(Untranslated(
             "Cannot set -bind or -whitebind together with -listen=0"));
     }
 
     // Make sure enough file descriptors are available
     int nBind = std::max(nUserBind, size_t(1));
     nUserMaxConnections =
         args.GetIntArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
     nMaxConnections = std::max(nUserMaxConnections, 0);
 
     // -maxavalancheoutbound takes precedence over -maxconnections
     const int maxAvalancheOutbound = args.GetIntArg(
         "-maxavalancheoutbound", DEFAULT_MAX_AVALANCHE_OUTBOUND_CONNECTIONS);
     if (isAvalancheEnabled(args) && maxAvalancheOutbound > nMaxConnections) {
         nMaxConnections = std::max(maxAvalancheOutbound, nMaxConnections);
         // Indicate the value set by the user
         LogPrintf("Increasing -maxconnections from %d to %d to comply with "
                   "-maxavalancheoutbound\n",
                   nUserMaxConnections, nMaxConnections);
     }
 
     // Trim requested connection counts, to fit into system limitations
     // <int> in std::min<int>(...) to work around FreeBSD compilation issue
     // described in #2695
     nFD = RaiseFileDescriptorLimit(
         nMaxConnections + nBind + MIN_CORE_FILEDESCRIPTORS +
         MAX_ADDNODE_CONNECTIONS + NUM_FDS_MESSAGE_CAPTURE);
 #ifdef USE_POLL
     int fd_max = nFD;
 #else
     int fd_max = FD_SETSIZE;
 #endif
     nMaxConnections = std::max(
         std::min<int>(nMaxConnections,
                       fd_max - nBind - MIN_CORE_FILEDESCRIPTORS -
                           MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE),
         0);
     if (nFD < MIN_CORE_FILEDESCRIPTORS) {
         return InitError(_("Not enough file descriptors available."));
     }
     nMaxConnections =
         std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS,
                  nMaxConnections);
 
     if (nMaxConnections < nUserMaxConnections) {
         // Not categorizing as "Warning" because this is the normal behavior for
         // platforms using the select() interface for which FD_SETSIZE is
         // usually 1024.
         LogPrintf("Reducing -maxconnections from %d to %d, because of system "
                   "limitations.\n",
                   nUserMaxConnections, nMaxConnections);
     }
 
     // Step 3: parameter-to-internal-flags
     init::SetLoggingCategories(args);
 
     // Configure excessive block size.
     const int64_t nProposedExcessiveBlockSize =
         args.GetIntArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE);
     if (nProposedExcessiveBlockSize <= 0 ||
         !config.SetMaxBlockSize(nProposedExcessiveBlockSize)) {
         return InitError(
             _("Excessive block size must be > 1,000,000 bytes (1MB)"));
     }
 
     // Check blockmaxsize does not exceed maximum accepted block size.
     const int64_t nProposedMaxGeneratedBlockSize =
         args.GetIntArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE);
     if (nProposedMaxGeneratedBlockSize <= 0) {
         return InitError(_("Max generated block size must be greater than 0"));
     }
     if (uint64_t(nProposedMaxGeneratedBlockSize) > config.GetMaxBlockSize()) {
         return InitError(_("Max generated block size (blockmaxsize) cannot "
                            "exceed the excessive block size "
                            "(excessiveblocksize)"));
     }
 
     nConnectTimeout = args.GetIntArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
     if (nConnectTimeout <= 0) {
         nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
     }
 
     peer_connect_timeout =
         args.GetIntArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
     if (peer_connect_timeout <= 0) {
         return InitError(Untranslated(
             "peertimeout cannot be configured with a negative value."));
     }
 
     // Sanity check argument for min fee for including tx in block
     // TODO: Harmonize which arguments need sanity checking and where that
     // happens.
     if (args.IsArgSet("-blockmintxfee")) {
         Amount n = Amount::zero();
         if (!ParseMoney(args.GetArg("-blockmintxfee", ""), n)) {
             return InitError(AmountErrMsg("blockmintxfee",
                                           args.GetArg("-blockmintxfee", "")));
         }
     }
 
     nBytesPerSigCheck =
         args.IsArgSet("-bytespersigcheck")
             ? args.GetIntArg("-bytespersigcheck", nBytesPerSigCheck)
             : args.GetIntArg("-bytespersigop", nBytesPerSigCheck);
 
     if (!g_wallet_init_interface.ParameterInteraction()) {
         return false;
     }
 
     // Option to startup with mocktime set (used for regression testing):
     SetMockTime(args.GetIntArg("-mocktime", 0)); // SetMockTime(0) is a no-op
 
     if (args.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) {
         nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
     }
 
     if (args.IsArgSet("-proxy") && args.GetArg("-proxy", "").empty()) {
         return InitError(_(
             "No proxy server specified. Use -proxy=<ip> or -proxy=<ip:port>."));
     }
 
     // Avalanche parameters
     const int64_t stakeUtxoMinConfirmations =
         args.GetIntArg("-avaproofstakeutxoconfirmations",
                        AVALANCHE_DEFAULT_STAKE_UTXO_CONFIRMATIONS);
 
     if (!chainparams.IsTestChain() &&
         stakeUtxoMinConfirmations !=
             AVALANCHE_DEFAULT_STAKE_UTXO_CONFIRMATIONS) {
         return InitError(_("Avalanche stake UTXO minimum confirmations can "
                            "only be set on test chains."));
     }
 
     if (stakeUtxoMinConfirmations <= 0) {
         return InitError(_("Avalanche stake UTXO minimum confirmations must be "
                            "a positive integer."));
     }
 
     if (args.IsArgSet("-avaproofstakeutxodustthreshold")) {
         Amount amount = Amount::zero();
         auto parsed = ParseMoney(
             args.GetArg("-avaproofstakeutxodustthreshold", ""), amount);
         if (!parsed || Amount::zero() == amount) {
             return InitError(AmountErrMsg(
                 "avaproofstakeutxodustthreshold",
                 args.GetArg("-avaproofstakeutxodustthreshold", "")));
         }
 
         if (!chainparams.IsTestChain() &&
             amount != avalanche::PROOF_DUST_THRESHOLD) {
             return InitError(_("Avalanche stake UTXO dust threshold can "
                                "only be set on test chains."));
         }
     }
 
     // This is a staking node
     if (isAvalancheEnabled(args) && args.IsArgSet("-avaproof")) {
         if (!args.GetBoolArg("-listen", true)) {
             return InitError(_("Running a staking node requires accepting "
                                "inbound connections. Please enable -listen."));
         }
         if (args.IsArgSet("-proxy")) {
             return InitError(_("Running a staking node behind a proxy is not "
                                "supported. Please disable -proxy."));
         }
         if (args.IsArgSet("-i2psam")) {
             return InitError(_("Running a staking node behind I2P is not "
                                "supported. Please disable -i2psam."));
         }
         if (args.IsArgSet("-onlynet")) {
             return InitError(
                 _("Restricting the outbound network is not supported when "
                   "running a staking node. Please disable -onlynet."));
         }
     }
 
     // Also report errors from parsing before daemonization
     {
         KernelNotifications notifications{};
         ChainstateManager::Options chainman_opts_dummy{
             .config = config,
             .datadir = args.GetDataDirNet(),
             .notifications = notifications,
         };
         if (const auto error{ApplyArgsManOptions(args, chainman_opts_dummy)}) {
             return InitError(*error);
         }
         BlockManager::Options blockman_opts_dummy{
             .chainparams = chainman_opts_dummy.config.GetChainParams(),
             .blocks_dir = args.GetBlocksDirPath(),
         };
         if (const auto error{ApplyArgsManOptions(args, blockman_opts_dummy)}) {
             return InitError(*error);
         }
     }
 
     return true;
 }
 
 static bool LockDataDirectory(bool probeOnly) {
     // Make sure only a single Bitcoin process is using the data directory.
     fs::path datadir = gArgs.GetDataDirNet();
     if (!DirIsWritable(datadir)) {
         return InitError(strprintf(
             _("Cannot write to data directory '%s'; check permissions."),
             fs::PathToString(datadir)));
     }
     if (!LockDirectory(datadir, ".lock", probeOnly)) {
         return InitError(strprintf(_("Cannot obtain a lock on data directory "
                                      "%s. %s is probably already running."),
                                    fs::PathToString(datadir), PACKAGE_NAME));
     }
     return true;
 }
 
 bool AppInitSanityChecks() {
     // Step 4: sanity checks
 
     init::SetGlobals();
 
     // Sanity check
     if (!init::SanityChecks()) {
         return InitError(strprintf(
             _("Initialization sanity check failed. %s is shutting down."),
             PACKAGE_NAME));
     }
 
     // Probe the data directory lock to give an early error message, if possible
     // We cannot hold the data directory lock here, as the forking for daemon()
     // hasn't yet happened, and a fork will cause weird behavior to it.
     return LockDataDirectory(true);
 }
 
 bool AppInitLockDataDirectory() {
     // After daemonization get the data directory lock again and hold on to it
     // until exit. This creates a slight window for a race condition to happen,
     // however this condition is harmless: it will at most make us exit without
     // printing a message to console.
     if (!LockDataDirectory(false)) {
         // Detailed error printed inside LockDataDirectory
         return false;
     }
     return true;
 }
 
 bool AppInitInterfaces(NodeContext &node) {
     node.chain = interfaces::MakeChain(node, Params());
     // Create client interfaces for wallets that are supposed to be loaded
     // according to -wallet and -disablewallet options. This only constructs
     // the interfaces, it doesn't load wallet data. Wallets actually get loaded
     // when load() and start() interface methods are called below.
     g_wallet_init_interface.Construct(node);
     return true;
 }
 
 bool AppInitMain(Config &config, RPCServer &rpcServer,
                  HTTPRPCRequestProcessor &httpRPCRequestProcessor,
                  NodeContext &node,
                  interfaces::BlockAndHeaderTipInfo *tip_info) {
     // Step 4a: application initialization
     const ArgsManager &args = *Assert(node.args);
     const CChainParams &chainparams = config.GetChainParams();
 
     if (!CreatePidFile(args)) {
         // Detailed error printed inside CreatePidFile().
         return false;
     }
     if (!init::StartLogging(args)) {
         // Detailed error printed inside StartLogging().
         return false;
     }
 
     LogPrintf("Using at most %i automatic connections (%i file descriptors "
               "available)\n",
               nMaxConnections, nFD);
 
     // Warn about relative -datadir path.
     if (args.IsArgSet("-datadir") &&
         !args.GetPathArg("-datadir").is_absolute()) {
         LogPrintf("Warning: relative datadir option '%s' specified, which will "
                   "be interpreted relative to the current working directory "
                   "'%s'. This is fragile, because if bitcoin is started in the "
                   "future from a different location, it will be unable to "
                   "locate the current data files. There could also be data "
                   "loss if bitcoin is started while in a temporary "
                   "directory.\n",
                   args.GetArg("-datadir", ""),
                   fs::PathToString(fs::current_path()));
     }
 
     ValidationCacheSizes validation_cache_sizes{};
     ApplyArgsManOptions(args, validation_cache_sizes);
 
     if (!InitSignatureCache(validation_cache_sizes.signature_cache_bytes)) {
         return InitError(strprintf(
             _("Unable to allocate memory for -maxsigcachesize: '%s' MiB"),
             args.GetIntArg("-maxsigcachesize",
                            DEFAULT_MAX_SIG_CACHE_BYTES >> 20)));
     }
     if (!InitScriptExecutionCache(
             validation_cache_sizes.script_execution_cache_bytes)) {
         return InitError(strprintf(
             _("Unable to allocate memory for -maxscriptcachesize: '%s' MiB"),
             args.GetIntArg("-maxscriptcachesize",
                            DEFAULT_MAX_SCRIPT_CACHE_BYTES >> 20)));
     }
 
     int script_threads = args.GetIntArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
     if (script_threads <= 0) {
         // -par=0 means autodetect (number of cores - 1 script threads)
         // -par=-n means "leave n cores free" (number of cores - n - 1 script
         // threads)
         script_threads += GetNumCores();
     }
 
     // Subtract 1 because the main thread counts towards the par threads
     script_threads = std::max(script_threads - 1, 0);
 
     // Number of script-checking threads <= MAX_SCRIPTCHECK_THREADS
     script_threads = std::min(script_threads, MAX_SCRIPTCHECK_THREADS);
 
     LogPrintf("Script verification uses %d additional threads\n",
               script_threads);
     if (script_threads >= 1) {
         StartScriptCheckWorkerThreads(script_threads);
     }
 
     assert(!node.scheduler);
     node.scheduler = std::make_unique<CScheduler>();
 
     // Start the lightweight task scheduler thread
     node.scheduler->m_service_thread =
         std::thread(&util::TraceThread, "scheduler",
                     [&] { node.scheduler->serviceQueue(); });
 
     // Gather some entropy once per minute.
     node.scheduler->scheduleEvery(
         [] {
             RandAddPeriodic();
             return true;
         },
         std::chrono::minutes{1});
 
     GetMainSignals().RegisterBackgroundSignalScheduler(*node.scheduler);
 
     /**
      * Register RPC commands regardless of -server setting so they will be
      * available in the GUI RPC console even if external calls are disabled.
      */
     RegisterAllRPCCommands(config, rpcServer, tableRPC);
     for (const auto &client : node.chain_clients) {
         client->registerRpcs();
     }
 #if ENABLE_ZMQ
     RegisterZMQRPCCommands(tableRPC);
 #endif
 
     /**
      * Start the RPC server.  It will be started in "warmup" mode and not
      * process calls yet (but it will verify that the server is there and will
      * be ready later).  Warmup mode will be completed when initialisation is
      * finished.
      */
     if (args.GetBoolArg("-server", false)) {
         uiInterface.InitMessage_connect(SetRPCWarmupStatus);
         if (!AppInitServers(config, httpRPCRequestProcessor, node)) {
             return InitError(
                 _("Unable to start HTTP server. See debug log for details."));
         }
     }
 
     // Step 5: verify wallet database integrity
     for (const auto &client : node.chain_clients) {
         if (!client->verify()) {
             return false;
         }
     }
 
     // Step 6: network initialization
 
     // Note that we absolutely cannot open any actual connections
     // until the very end ("start node") as the UTXO/block state
     // is not yet setup and may end up being set up twice if we
     // need to reindex later.
 
     fListen = args.GetBoolArg("-listen", DEFAULT_LISTEN);
     fDiscover = args.GetBoolArg("-discover", true);
 
     {
         // Initialize addrman
         assert(!node.addrman);
 
         // Read asmap file if configured
         std::vector<bool> asmap;
         if (args.IsArgSet("-asmap")) {
             fs::path asmap_path =
                 args.GetPathArg("-asmap", DEFAULT_ASMAP_FILENAME);
             if (!asmap_path.is_absolute()) {
                 asmap_path = args.GetDataDirNet() / asmap_path;
             }
             if (!fs::exists(asmap_path)) {
                 InitError(strprintf(_("Could not find asmap file %s"),
                                     fs::quoted(fs::PathToString(asmap_path))));
                 return false;
             }
             asmap = DecodeAsmap(asmap_path);
             if (asmap.size() == 0) {
                 InitError(strprintf(_("Could not parse asmap file %s"),
                                     fs::quoted(fs::PathToString(asmap_path))));
                 return false;
             }
             const uint256 asmap_version = SerializeHash(asmap);
             LogPrintf("Using asmap version %s for IP bucketing\n",
                       asmap_version.ToString());
         } else {
             LogPrintf("Using /16 prefix for IP bucketing\n");
         }
 
         uiInterface.InitMessage(_("Loading P2P addresses...").translated);
         auto addrman{LoadAddrman(chainparams, asmap, args)};
         if (!addrman) {
             return InitError(util::ErrorString(addrman));
         }
         node.addrman = std::move(*addrman);
     }
 
     assert(!node.banman);
     node.banman = std::make_unique<BanMan>(
         args.GetDataDirNet() / "banlist.dat", config.GetChainParams(),
         &uiInterface, args.GetIntArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
     assert(!node.connman);
     node.connman = std::make_unique<CConnman>(
         config, GetRand<uint64_t>(), GetRand<uint64_t>(), *node.addrman,
         args.GetBoolArg("-networkactive", true));
 
     // sanitize comments per BIP-0014, format user agent and check total size
     std::vector<std::string> uacomments;
     for (const std::string &cmt : args.GetArgs("-uacomment")) {
         if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT)) {
             return InitError(strprintf(
                 _("User Agent comment (%s) contains unsafe characters."), cmt));
         }
         uacomments.push_back(cmt);
     }
     const std::string client_name = args.GetArg("-uaclientname", CLIENT_NAME);
     const std::string client_version =
         args.GetArg("-uaclientversion", FormatVersion(CLIENT_VERSION));
     if (client_name != SanitizeString(client_name, SAFE_CHARS_UA_COMMENT)) {
         return InitError(strprintf(
             _("-uaclientname (%s) contains invalid characters."), client_name));
     }
     if (client_version !=
         SanitizeString(client_version, SAFE_CHARS_UA_COMMENT)) {
         return InitError(
             strprintf(_("-uaclientversion (%s) contains invalid characters."),
                       client_version));
     }
     const std::string strSubVersion =
         FormatUserAgent(client_name, client_version, uacomments);
     if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
         return InitError(strprintf(
             _("Total length of network version string (%i) exceeds maximum "
               "length (%i). Reduce the number or size of uacomments."),
             strSubVersion.size(), MAX_SUBVERSION_LENGTH));
     }
 
     if (args.IsArgSet("-onlynet")) {
         std::set<enum Network> nets;
         for (const std::string &snet : args.GetArgs("-onlynet")) {
             enum Network net = ParseNetwork(snet);
             if (net == NET_UNROUTABLE) {
                 return InitError(strprintf(
                     _("Unknown network specified in -onlynet: '%s'"), snet));
             }
             nets.insert(net);
         }
         for (int n = 0; n < NET_MAX; n++) {
             enum Network net = (enum Network)n;
             if (!nets.count(net)) {
                 SetReachable(net, false);
             }
         }
     }
 
     // Check for host lookup allowed before parsing any network related
     // parameters
     fNameLookup = args.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
 
     bool proxyRandomize =
         args.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
     // -proxy sets a proxy for all outgoing network traffic
     // -noproxy (or -proxy=0) as well as the empty string can be used to not set
     // a proxy, this is the default
     std::string proxyArg = args.GetArg("-proxy", "");
     SetReachable(NET_ONION, false);
     if (proxyArg != "" && proxyArg != "0") {
         CService proxyAddr;
         if (!Lookup(proxyArg, proxyAddr, 9050, fNameLookup)) {
             return InitError(strprintf(
                 _("Invalid -proxy address or hostname: '%s'"), proxyArg));
         }
 
         proxyType addrProxy = proxyType(proxyAddr, proxyRandomize);
         if (!addrProxy.IsValid()) {
             return InitError(strprintf(
                 _("Invalid -proxy address or hostname: '%s'"), proxyArg));
         }
 
         SetProxy(NET_IPV4, addrProxy);
         SetProxy(NET_IPV6, addrProxy);
         SetProxy(NET_ONION, addrProxy);
         SetNameProxy(addrProxy);
         // by default, -proxy sets onion as reachable, unless -noonion later
         SetReachable(NET_ONION, true);
     }
 
     // -onion can be used to set only a proxy for .onion, or override normal
     // proxy for .onion addresses.
     // -noonion (or -onion=0) disables connecting to .onion entirely. An empty
     // string is used to not override the onion proxy (in which case it defaults
     // to -proxy set above, or none)
     std::string onionArg = args.GetArg("-onion", "");
     if (onionArg != "") {
         if (onionArg == "0") {
             // Handle -noonion/-onion=0
             SetReachable(NET_ONION, false);
         } else {
             CService onionProxy;
             if (!Lookup(onionArg, onionProxy, 9050, fNameLookup)) {
                 return InitError(strprintf(
                     _("Invalid -onion address or hostname: '%s'"), onionArg));
             }
             proxyType addrOnion = proxyType(onionProxy, proxyRandomize);
             if (!addrOnion.IsValid()) {
                 return InitError(strprintf(
                     _("Invalid -onion address or hostname: '%s'"), onionArg));
             }
             SetProxy(NET_ONION, addrOnion);
             SetReachable(NET_ONION, true);
         }
     }
 
     for (const std::string &strAddr : args.GetArgs("-externalip")) {
         CService addrLocal;
         if (Lookup(strAddr, addrLocal, GetListenPort(), fNameLookup) &&
             addrLocal.IsValid()) {
             AddLocal(addrLocal, LOCAL_MANUAL);
         } else {
             return InitError(ResolveErrMsg("externalip", strAddr));
         }
     }
 
 #if ENABLE_ZMQ
     g_zmq_notification_interface = CZMQNotificationInterface::Create(
         [&chainman = node.chainman](CBlock &block, const CBlockIndex &index) {
             assert(chainman);
             return chainman->m_blockman.ReadBlockFromDisk(block, index);
         });
 
     if (g_zmq_notification_interface) {
         RegisterValidationInterface(g_zmq_notification_interface.get());
     }
 #endif
 
     // Step 7: load block chain
 
     node.notifications = std::make_unique<KernelNotifications>();
     fReindex = args.GetBoolArg("-reindex", false);
     bool fReindexChainState = args.GetBoolArg("-reindex-chainstate", false);
 
     ChainstateManager::Options chainman_opts{
         .config = config,
         .datadir = args.GetDataDirNet(),
         .adjusted_time_callback = GetAdjustedTime,
         .notifications = *node.notifications,
     };
     // no error can happen, already checked in AppInitParameterInteraction
     Assert(!ApplyArgsManOptions(args, chainman_opts));
 
     if (chainman_opts.checkpoints_enabled) {
         LogPrintf("Checkpoints will be verified.\n");
     } else {
         LogPrintf("Skipping checkpoint verification.\n");
     }
 
     BlockManager::Options blockman_opts{
         .chainparams = chainman_opts.config.GetChainParams(),
         .blocks_dir = args.GetBlocksDirPath(),
     };
     // no error can happen, already checked in AppInitParameterInteraction
     Assert(!ApplyArgsManOptions(args, blockman_opts));
 
     // cache size calculations
     CacheSizes cache_sizes =
         CalculateCacheSizes(args, g_enabled_filter_types.size());
 
     LogPrintf("Cache configuration:\n");
     LogPrintf("* Using %.1f MiB for block index database\n",
               cache_sizes.block_tree_db * (1.0 / 1024 / 1024));
     if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
         LogPrintf("* Using %.1f MiB for transaction index database\n",
                   cache_sizes.tx_index * (1.0 / 1024 / 1024));
     }
     for (BlockFilterType filter_type : g_enabled_filter_types) {
         LogPrintf("* Using %.1f MiB for %s block filter index database\n",
                   cache_sizes.filter_index * (1.0 / 1024 / 1024),
                   BlockFilterTypeName(filter_type));
     }
     LogPrintf("* Using %.1f MiB for chain state database\n",
               cache_sizes.coins_db * (1.0 / 1024 / 1024));
 
     assert(!node.mempool);
     assert(!node.chainman);
 
     CTxMemPool::Options mempool_opts{
         .check_ratio = chainparams.DefaultConsistencyChecks() ? 1 : 0,
     };
     if (const auto err{ApplyArgsManOptions(args, chainparams, mempool_opts)}) {
         return InitError(*err);
     }
     mempool_opts.check_ratio =
         std::clamp<int>(mempool_opts.check_ratio, 0, 1'000'000);
 
     // FIXME: this legacy limit comes from the DEFAULT_DESCENDANT_SIZE_LIMIT
     // (101) that was enforced before the wellington activation. While it's
     // still a good idea to have some minimum mempool size, using this value as
     // a threshold is no longer relevant.
     int64_t nMempoolSizeMin = 101 * 1000 * 40;
     if (mempool_opts.max_size_bytes < 0 ||
         (!chainparams.IsTestChain() &&
          mempool_opts.max_size_bytes < nMempoolSizeMin)) {
         return InitError(strprintf(_("-maxmempool must be at least %d MB"),
                                    std::ceil(nMempoolSizeMin / 1000000.0)));
     }
     LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of "
               "unused mempool space)\n",
               cache_sizes.coins * (1.0 / 1024 / 1024),
               mempool_opts.max_size_bytes * (1.0 / 1024 / 1024));
 
     for (bool fLoaded = false; !fLoaded && !ShutdownRequested();) {
         node.mempool = std::make_unique<CTxMemPool>(mempool_opts);
 
         node.chainman =
             std::make_unique<ChainstateManager>(chainman_opts, blockman_opts);
         ChainstateManager &chainman = *node.chainman;
 
         node::ChainstateLoadOptions options;
         options.mempool = Assert(node.mempool.get());
         options.reindex = node::fReindex;
         options.reindex_chainstate = fReindexChainState;
         options.prune = chainman.m_blockman.IsPruneMode();
         options.check_blocks =
             args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
         options.check_level = args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
         options.require_full_verification =
             args.IsArgSet("-checkblocks") || args.IsArgSet("-checklevel");
         options.check_interrupt = ShutdownRequested;
         options.coins_error_cb = [] {
             uiInterface.ThreadSafeMessageBox(
                 _("Error reading from database, shutting down."), "",
                 CClientUIInterface::MSG_ERROR);
         };
 
         uiInterface.InitMessage(_("Loading block index...").translated);
 
         const int64_t load_block_index_start_time = GetTimeMillis();
         auto catch_exceptions = [](auto &&f) {
             try {
                 return f();
             } catch (const std::exception &e) {
                 LogPrintf("%s\n", e.what());
                 return std::make_tuple(node::ChainstateLoadStatus::FAILURE,
                                        _("Error opening block database"));
             }
         };
         auto [status, error] = catch_exceptions(
             [&] { return LoadChainstate(chainman, cache_sizes, options); });
         if (status == node::ChainstateLoadStatus::SUCCESS) {
             uiInterface.InitMessage(_("Verifying blocks...").translated);
             if (chainman.m_blockman.m_have_pruned &&
                 options.check_blocks > MIN_BLOCKS_TO_KEEP) {
                 LogPrintf("Prune: pruned datadir may not have more than %d "
                           "blocks; only checking available blocks\n",
                           MIN_BLOCKS_TO_KEEP);
             }
             std::tie(status, error) = catch_exceptions(
                 [&] { return VerifyLoadedChainstate(chainman, options); });
             if (status == node::ChainstateLoadStatus::SUCCESS) {
                 fLoaded = true;
                 LogPrintf(" block index %15dms\n",
                           GetTimeMillis() - load_block_index_start_time);
             }
         }
 
         if (status == node::ChainstateLoadStatus::FAILURE_FATAL ||
             status == node::ChainstateLoadStatus::FAILURE_INCOMPATIBLE_DB ||
             status ==
                 node::ChainstateLoadStatus::FAILURE_INSUFFICIENT_DBCACHE) {
             return InitError(error);
         }
 
         if (!fLoaded && !ShutdownRequested()) {
             // first suggest a reindex
             if (!options.reindex) {
                 bool fRet = uiInterface.ThreadSafeQuestion(
                     error + Untranslated(".\n\n") +
                         _("Do you want to rebuild the block database now?"),
                     error.original + ".\nPlease restart with -reindex or "
                                      "-reindex-chainstate to recover.",
                     "",
                     CClientUIInterface::MSG_ERROR |
                         CClientUIInterface::BTN_ABORT);
                 if (fRet) {
                     fReindex = true;
                     AbortShutdown();
                 } else {
                     LogPrintf("Aborted block database rebuild. Exiting.\n");
                     return false;
                 }
             } else {
                 return InitError(error);
             }
         }
     }
 
     // As LoadBlockIndex can take several minutes, it's possible the user
     // requested to kill the GUI during the last operation. If so, exit.
     // As the program has not fully started yet, Shutdown() is possibly
     // overkill.
     if (ShutdownRequested()) {
         LogPrintf("Shutdown requested. Exiting.\n");
         return false;
     }
 
     ChainstateManager &chainman = *Assert(node.chainman);
 
     // Initialize Avalanche.
     bilingual_str avalancheError;
     g_avalanche = avalanche::Processor::MakeProcessor(
         args, *node.chain, node.connman.get(), chainman, node.mempool.get(),
         *node.scheduler, avalancheError);
     if (!g_avalanche) {
         InitError(avalancheError);
         return false;
     }
 
     if (isAvalancheEnabled(args) &&
         g_avalanche->isAvalancheServiceAvailable()) {
         nLocalServices = ServiceFlags(nLocalServices | NODE_AVALANCHE);
     }
 
     assert(!node.peerman);
-    node.peerman = PeerManager::make(
-        *node.connman, *node.addrman, node.banman.get(), chainman,
-        *node.mempool, args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY));
+    node.peerman =
+        PeerManager::make(*node.connman, *node.addrman, node.banman.get(),
+                          chainman, *node.mempool, g_avalanche.get(),
+                          args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY));
     RegisterValidationInterface(node.peerman.get());
 
     // Encoded addresses using cashaddr instead of base58.
     // We do this by default to avoid confusion with BTC addresses.
     config.SetCashAddrEncoding(args.GetBoolArg("-usecashaddr", true));
 
     // Step 8: load indexers
     if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
         auto result{
             WITH_LOCK(cs_main, return CheckLegacyTxindex(*Assert(
                                    chainman.m_blockman.m_block_tree_db)))};
         if (!result) {
             return InitError(util::ErrorString(result));
         }
 
         g_txindex =
             std::make_unique<TxIndex>(cache_sizes.tx_index, false, fReindex);
         if (!g_txindex->Start(chainman.ActiveChainstate())) {
             return false;
         }
     }
 
     for (const auto &filter_type : g_enabled_filter_types) {
         InitBlockFilterIndex(filter_type, cache_sizes.filter_index, false,
                              fReindex);
         if (!GetBlockFilterIndex(filter_type)
                  ->Start(chainman.ActiveChainstate())) {
             return false;
         }
     }
 
     if (args.GetBoolArg("-coinstatsindex", DEFAULT_COINSTATSINDEX)) {
         g_coin_stats_index = std::make_unique<CoinStatsIndex>(
             /* cache size */ 0, false, fReindex);
         if (!g_coin_stats_index->Start(chainman.ActiveChainstate())) {
             return false;
         }
     }
 
 #if ENABLE_CHRONIK
     if (args.GetBoolArg("-chronik", DEFAULT_CHRONIK)) {
         const bool fReindexChronik =
             fReindex || args.GetBoolArg("-chronikreindex", false);
         if (!chronik::Start(config, node, fReindexChronik)) {
             return false;
         }
     }
 #endif
 
     // Step 9: load wallet
     for (const auto &client : node.chain_clients) {
         if (!client->load()) {
             return false;
         }
     }
 
     // Step 10: data directory maintenance
 
     // if pruning, unset the service bit and perform the initial blockstore
     // prune after any wallet rescanning has taken place.
     if (chainman.m_blockman.IsPruneMode()) {
         LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
         nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
         if (!fReindex) {
             LOCK(cs_main);
             for (Chainstate *chainstate : chainman.GetAll()) {
                 uiInterface.InitMessage(_("Pruning blockstore...").translated);
                 chainstate->PruneAndFlush();
             }
         }
     }
 
     // Step 11: import blocks
     if (!CheckDiskSpace(args.GetDataDirNet())) {
         InitError(
             strprintf(_("Error: Disk space is low for %s"),
                       fs::quoted(fs::PathToString(args.GetDataDirNet()))));
         return false;
     }
     if (!CheckDiskSpace(args.GetBlocksDirPath())) {
         InitError(
             strprintf(_("Error: Disk space is low for %s"),
                       fs::quoted(fs::PathToString(args.GetBlocksDirPath()))));
         return false;
     }
 
     // Either install a handler to notify us when genesis activates, or set
     // fHaveGenesis directly.
     // No locking, as this happens before any background thread is started.
     boost::signals2::connection block_notify_genesis_wait_connection;
     if (WITH_LOCK(chainman.GetMutex(),
                   return chainman.ActiveChain().Tip() == nullptr)) {
         block_notify_genesis_wait_connection =
             uiInterface.NotifyBlockTip_connect(
                 std::bind(BlockNotifyGenesisWait, std::placeholders::_2));
     } else {
         fHaveGenesis = true;
     }
 
 #if defined(HAVE_SYSTEM)
     const std::string block_notify = args.GetArg("-blocknotify", "");
     if (!block_notify.empty()) {
         uiInterface.NotifyBlockTip_connect([block_notify](
                                                SynchronizationState sync_state,
                                                const CBlockIndex *pBlockIndex) {
             if (sync_state != SynchronizationState::POST_INIT || !pBlockIndex) {
                 return;
             }
             std::string command = block_notify;
             ReplaceAll(command, "%s", pBlockIndex->GetBlockHash().GetHex());
             std::thread t(runCommand, command);
             // thread runs free
             t.detach();
         });
     }
 #endif
 
     std::vector<fs::path> vImportFiles;
     for (const std::string &strFile : args.GetArgs("-loadblock")) {
         vImportFiles.push_back(fs::PathFromString(strFile));
     }
 
     chainman.m_load_block =
         std::thread(&util::TraceThread, "loadblk", [=, &chainman, &args] {
             ThreadImport(chainman, vImportFiles,
                          ShouldPersistMempool(args) ? MempoolPath(args)
                                                     : fs::path{});
         });
 
     // Wait for genesis block to be processed
     {
         WAIT_LOCK(g_genesis_wait_mutex, lock);
         // We previously could hang here if StartShutdown() is called prior to
         // ThreadImport getting started, so instead we just wait on a timer to
         // check ShutdownRequested() regularly.
         while (!fHaveGenesis && !ShutdownRequested()) {
             g_genesis_wait_cv.wait_for(lock, std::chrono::milliseconds(500));
         }
         block_notify_genesis_wait_connection.disconnect();
     }
 
     if (ShutdownRequested()) {
         return false;
     }
 
     // Step 12: start node
 
     int chain_active_height;
 
     //// debug print
     {
         LOCK(cs_main);
         LogPrintf("block tree size = %u\n", chainman.BlockIndex().size());
         chain_active_height = chainman.ActiveChain().Height();
         if (tip_info) {
             tip_info->block_height = chain_active_height;
             tip_info->block_time =
                 chainman.ActiveChain().Tip()
                     ? chainman.ActiveChain().Tip()->GetBlockTime()
                     : chainman.GetParams().GenesisBlock().GetBlockTime();
             tip_info->verification_progress = GuessVerificationProgress(
                 chainman.GetParams().TxData(), chainman.ActiveChain().Tip());
         }
         if (tip_info && chainman.m_best_header) {
             tip_info->header_height = chainman.m_best_header->nHeight;
             tip_info->header_time = chainman.m_best_header->GetBlockTime();
         }
     }
     LogPrintf("nBestHeight = %d\n", chain_active_height);
     if (node.peerman) {
         node.peerman->SetBestHeight(chain_active_height);
     }
 
     // Map ports with UPnP or NAT-PMP.
     StartMapPort(args.GetBoolArg("-upnp", DEFAULT_UPNP),
                  args.GetBoolArg("-natpmp", DEFAULT_NATPMP));
 
     CConnman::Options connOptions;
     connOptions.nLocalServices = nLocalServices;
     connOptions.nMaxConnections = nMaxConnections;
     connOptions.m_max_avalanche_outbound =
         g_avalanche && isAvalancheEnabled(args)
             ? args.GetIntArg("-maxavalancheoutbound",
                              DEFAULT_MAX_AVALANCHE_OUTBOUND_CONNECTIONS)
             : 0;
     connOptions.m_max_outbound_full_relay = std::min(
         MAX_OUTBOUND_FULL_RELAY_CONNECTIONS,
         connOptions.nMaxConnections - connOptions.m_max_avalanche_outbound);
     connOptions.m_max_outbound_block_relay = std::min(
         MAX_BLOCK_RELAY_ONLY_CONNECTIONS,
         connOptions.nMaxConnections - connOptions.m_max_avalanche_outbound -
             connOptions.m_max_outbound_full_relay);
     connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS;
     connOptions.nMaxFeeler = MAX_FEELER_CONNECTIONS;
     connOptions.uiInterface = &uiInterface;
     connOptions.m_banman = node.banman.get();
     connOptions.m_msgproc.push_back(node.peerman.get());
     if (g_avalanche) {
         connOptions.m_msgproc.push_back(g_avalanche.get());
     }
     connOptions.nSendBufferMaxSize =
         1000 * args.GetIntArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
     connOptions.nReceiveFloodSize =
         1000 * args.GetIntArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
     connOptions.m_added_nodes = args.GetArgs("-addnode");
 
     connOptions.nMaxOutboundLimit =
         1024 * 1024 *
         args.GetIntArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET);
     connOptions.m_peer_connect_timeout = peer_connect_timeout;
 
     // Port to bind to if `-bind=addr` is provided without a `:port` suffix.
     const uint16_t default_bind_port = static_cast<uint16_t>(
         args.GetIntArg("-port", config.GetChainParams().GetDefaultPort()));
 
     const auto BadPortWarning = [](const char *prefix, uint16_t port) {
         return strprintf(_("%s request to listen on port %u. This port is "
                            "considered \"bad\" and "
                            "thus it is unlikely that any Bitcoin ABC peers "
                            "connect to it. See "
                            "doc/p2p-bad-ports.md for details and a full list."),
                          prefix, port);
     };
 
     for (const std::string &bind_arg : args.GetArgs("-bind")) {
         CService bind_addr;
         const size_t index = bind_arg.rfind('=');
         if (index == std::string::npos) {
             if (Lookup(bind_arg, bind_addr, default_bind_port,
                        /*fAllowLookup=*/false)) {
                 connOptions.vBinds.push_back(bind_addr);
                 if (IsBadPort(bind_addr.GetPort())) {
                     InitWarning(BadPortWarning("-bind", bind_addr.GetPort()));
                 }
                 continue;
             }
         } else {
             const std::string network_type = bind_arg.substr(index + 1);
             if (network_type == "onion") {
                 const std::string truncated_bind_arg =
                     bind_arg.substr(0, index);
                 if (Lookup(truncated_bind_arg, bind_addr,
                            BaseParams().OnionServiceTargetPort(), false)) {
                     connOptions.onion_binds.push_back(bind_addr);
                     continue;
                 }
             }
         }
         return InitError(ResolveErrMsg("bind", bind_arg));
     }
 
     for (const std::string &strBind : args.GetArgs("-whitebind")) {
         NetWhitebindPermissions whitebind;
         bilingual_str error;
         if (!NetWhitebindPermissions::TryParse(strBind, whitebind, error)) {
             return InitError(error);
         }
         connOptions.vWhiteBinds.push_back(whitebind);
     }
 
     // If the user did not specify -bind= or -whitebind= then we bind
     // on any address - 0.0.0.0 (IPv4) and :: (IPv6).
     connOptions.bind_on_any =
         args.GetArgs("-bind").empty() && args.GetArgs("-whitebind").empty();
 
     // Emit a warning if a bad port is given to -port= but only if -bind and
     // -whitebind are not given, because if they are, then -port= is ignored.
     if (connOptions.bind_on_any && args.IsArgSet("-port")) {
         const uint16_t port_arg = args.GetIntArg("-port", 0);
         if (IsBadPort(port_arg)) {
             InitWarning(BadPortWarning("-port", port_arg));
         }
     }
 
     CService onion_service_target;
     if (!connOptions.onion_binds.empty()) {
         onion_service_target = connOptions.onion_binds.front();
     } else {
         onion_service_target = DefaultOnionServiceTarget();
         connOptions.onion_binds.push_back(onion_service_target);
     }
 
     if (args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
         if (connOptions.onion_binds.size() > 1) {
             InitWarning(strprintf(
                 _("More than one onion bind address is provided. Using %s "
                   "for the automatically created Tor onion service."),
                 onion_service_target.ToStringIPPort()));
         }
         StartTorControl(onion_service_target);
     }
 
     if (connOptions.bind_on_any) {
         // Only add all IP addresses of the machine if we would be listening on
         // any address - 0.0.0.0 (IPv4) and :: (IPv6).
         Discover();
     }
 
     for (const auto &net : args.GetArgs("-whitelist")) {
         NetWhitelistPermissions subnet;
         bilingual_str error;
         if (!NetWhitelistPermissions::TryParse(net, subnet, error)) {
             return InitError(error);
         }
         connOptions.vWhitelistedRange.push_back(subnet);
     }
 
     connOptions.vSeedNodes = args.GetArgs("-seednode");
 
     // Initiate outbound connections unless connect=0
     connOptions.m_use_addrman_outgoing = !args.IsArgSet("-connect");
     if (!connOptions.m_use_addrman_outgoing) {
         const auto connect = args.GetArgs("-connect");
         if (connect.size() != 1 || connect[0] != "0") {
             connOptions.m_specified_outgoing = connect;
         }
     }
 
     const std::string &i2psam_arg = args.GetArg("-i2psam", "");
     if (!i2psam_arg.empty()) {
         CService addr;
         if (!Lookup(i2psam_arg, addr, 7656, fNameLookup) || !addr.IsValid()) {
             return InitError(strprintf(
                 _("Invalid -i2psam address or hostname: '%s'"), i2psam_arg));
         }
         SetReachable(NET_I2P, true);
         SetProxy(NET_I2P, proxyType{addr});
     } else {
         SetReachable(NET_I2P, false);
     }
 
     connOptions.m_i2p_accept_incoming =
         args.GetBoolArg("-i2pacceptincoming", true);
 
     if (!node.connman->Start(*node.scheduler, connOptions)) {
         return false;
     }
 
     // Step 13: finished
 
     // At this point, the RPC is "started", but still in warmup, which means it
     // cannot yet be called. Before we make it callable, we need to make sure
     // that the RPC's view of the best block is valid and consistent with
     // ChainstateManager's active tip.
     //
     // If we do not do this, RPC's view of the best block will be height=0 and
     // hash=0x0. This will lead to erroroneous responses for things like
     // waitforblockheight.
     RPCNotifyBlockChange(
         WITH_LOCK(chainman.GetMutex(), return chainman.ActiveTip()));
     SetRPCWarmupFinished();
 
     uiInterface.InitMessage(_("Done loading").translated);
 
     for (const auto &client : node.chain_clients) {
         client->start(*node.scheduler);
     }
 
     BanMan *banman = node.banman.get();
     node.scheduler->scheduleEvery(
         [banman] {
             banman->DumpBanlist();
             return true;
         },
         DUMP_BANS_INTERVAL);
 
     // Start Avalanche's event loop.
     g_avalanche->startEventLoop(*node.scheduler);
 
     if (node.peerman) {
         node.peerman->StartScheduledTasks(*node.scheduler);
     }
 
 #if HAVE_SYSTEM
     StartupNotify(args);
 #endif
 
     return true;
 }
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 62a4178166..822609b001 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -1,8404 +1,8409 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <net_processing.h>
 
 #include <addrman.h>
 #include <avalanche/avalanche.h>
 #include <avalanche/compactproofs.h>
 #include <avalanche/peermanager.h>
 #include <avalanche/processor.h>
 #include <avalanche/proof.h>
 #include <avalanche/statistics.h>
 #include <avalanche/validation.h>
 #include <banman.h>
 #include <blockencodings.h>
 #include <blockfilter.h>
 #include <blockvalidity.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <common/args.h>
 #include <config.h>
 #include <consensus/amount.h>
 #include <consensus/validation.h>
 #include <hash.h>
 #include <headerssync.h>
 #include <index/blockfilterindex.h>
 #include <invrequest.h>
 #include <kernel/mempool_entry.h>
 #include <merkleblock.h>
 #include <netbase.h>
 #include <netmessagemaker.h>
 #include <node/blockstorage.h>
 #include <policy/fees.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <primitives/block.h>
 #include <primitives/transaction.h>
 #include <random.h>
 #include <reverse_iterator.h>
 #include <scheduler.h>
 #include <streams.h>
 #include <tinyformat.h>
 #include <txmempool.h>
 #include <txorphanage.h>
 #include <util/check.h> // For NDEBUG compile time check
 #include <util/strencodings.h>
 #include <util/trace.h>
 #include <validation.h>
 
 #include <algorithm>
 #include <atomic>
 #include <chrono>
 #include <functional>
 #include <future>
 #include <memory>
 #include <typeinfo>
 
 /** How long to cache transactions in mapRelay for normal relay */
 static constexpr auto RELAY_TX_CACHE_TIME = 15min;
 /**
  * How long a transaction has to be in the mempool before it can
  * unconditionally be relayed (even when not in mapRelay).
  */
 static constexpr auto UNCONDITIONAL_RELAY_DELAY = 2min;
 /**
  * Headers download timeout.
  * Timeout = base + per_header * (expected number of headers)
  */
 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_BASE = 15min;
 static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
 /** How long to wait for a peer to respond to a getheaders request */
 static constexpr auto HEADERS_RESPONSE_TIME{2min};
 /**
  * Protect at least this many outbound peers from disconnection due to
  * slow/behind headers chain.
  */
 static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
 /** Timeout for (unprotected) outbound peers to sync to our chainwork */
 static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
 /** How frequently to check for stale tips */
 static constexpr auto STALE_CHECK_INTERVAL{10min};
 /** How frequently to check for extra outbound peers and disconnect. */
 static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
 /**
  * Minimum time an outbound-peer-eviction candidate must be connected for, in
  * order to evict
  */
 static constexpr auto MINIMUM_CONNECT_TIME{30s};
 /** SHA256("main address relay")[0:8] */
 static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
 /// Age after which a stale block will no longer be served if requested as
 /// protection against fingerprinting. Set to one month, denominated in seconds.
 static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
 /// Age after which a block is considered historical for purposes of rate
 /// limiting block relay. Set to one week, denominated in seconds.
 static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
 /**
  * Time between pings automatically sent out for latency probing and keepalive.
  */
 static constexpr auto PING_INTERVAL{2min};
 /** The maximum number of entries in a locator */
 static const unsigned int MAX_LOCATOR_SZ = 101;
 /** The maximum number of entries in an 'inv' protocol message */
 static const unsigned int MAX_INV_SZ = 50000;
 static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv),
               "Max protocol message length must be greater than largest "
               "possible INV message");
 
 /** Minimum time between 2 successives getavaaddr messages from the same peer */
 static constexpr auto GETAVAADDR_INTERVAL{2min};
 
 /**
  * If no proof was requested from a compact proof message after this timeout
  * expired, the proof radix tree can be cleaned up.
  */
 static constexpr auto AVALANCHE_AVAPROOFS_TIMEOUT{2min};
 
 struct DataRequestParameters {
     /**
      * Maximum number of in-flight data requests from a peer. It is not a hard
      * limit, but the threshold at which point the overloaded_peer_delay kicks
      * in.
      */
     const size_t max_peer_request_in_flight;
 
     /**
      * Maximum number of inventories to consider for requesting, per peer. It
      * provides a reasonable DoS limit to per-peer memory usage spent on
      * announcements, while covering peers continuously sending INVs at the
      * maximum rate (by our own policy, see INVENTORY_BROADCAST_PER_SECOND) for
      * several minutes, while not receiving the actual data (from any peer) in
      * response to requests for them.
      */
     const size_t max_peer_announcements;
 
     /** How long to delay requesting data from non-preferred peers */
     const std::chrono::seconds nonpref_peer_delay;
 
     /**
      * How long to delay requesting data from overloaded peers (see
      * max_peer_request_in_flight).
      */
     const std::chrono::seconds overloaded_peer_delay;
 
     /**
      * How long to wait (in microseconds) before a data request from an
      * additional peer.
      */
     const std::chrono::microseconds getdata_interval;
 
     /**
      * Permission flags a peer requires to bypass the request limits tracking
      * limits and delay penalty.
      */
     const NetPermissionFlags bypass_request_limits_permissions;
 };
 
 static constexpr DataRequestParameters TX_REQUEST_PARAMS{
     100,                       // max_peer_request_in_flight
     5000,                      // max_peer_announcements
     std::chrono::seconds(2),   // nonpref_peer_delay
     std::chrono::seconds(2),   // overloaded_peer_delay
     std::chrono::seconds(60),  // getdata_interval
     NetPermissionFlags::Relay, // bypass_request_limits_permissions
 };
 
 static constexpr DataRequestParameters PROOF_REQUEST_PARAMS{
     100,                      // max_peer_request_in_flight
     5000,                     // max_peer_announcements
     std::chrono::seconds(2),  // nonpref_peer_delay
     std::chrono::seconds(2),  // overloaded_peer_delay
     std::chrono::seconds(60), // getdata_interval
     NetPermissionFlags::
         BypassProofRequestLimits, // bypass_request_limits_permissions
 };
 
 /**
  * Limit to avoid sending big packets. Not used in processing incoming GETDATA
  * for compatibility.
  */
 static const unsigned int MAX_GETDATA_SZ = 1000;
 /**
  * Number of blocks that can be requested at any given time from a single peer.
  */
 static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
 /**
  * Default time during which a peer must stall block download progress before
  * being disconnected. The actual timeout is increased temporarily if peers are
  * disconnected for hitting the timeout
  */
 static constexpr auto BLOCK_STALLING_TIMEOUT_DEFAULT{2s};
 /** Maximum timeout for stalling block download. */
 static constexpr auto BLOCK_STALLING_TIMEOUT_MAX{64s};
 /**
  * Number of headers sent in one getheaders result. We rely on the assumption
  * that if a peer sends
  *  less than this number, we reached its tip. Changing this value is a protocol
  * upgrade.
  */
 static const unsigned int MAX_HEADERS_RESULTS = 2000;
 /**
  * Maximum depth of blocks we're willing to serve as compact blocks to peers
  *  when requested. For older blocks, a regular BLOCK response will be sent.
  */
 static const int MAX_CMPCTBLOCK_DEPTH = 5;
 /**
  * Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests
  * for.
  */
 static const int MAX_BLOCKTXN_DEPTH = 10;
 /**
  * Size of the "block download window": how far ahead of our current height do
  * we fetch? Larger windows tolerate larger download speed differences between
  * peer, but increase the potential degree of disordering of blocks on disk
  * (which make reindexing and pruning harder). We'll probably
  *  want to make this a per-peer adaptive value at some point.
  */
 static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024;
 /**
  * Block download timeout base, expressed in multiples of the block interval
  * (i.e. 10 min)
  */
 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
 /**
  * Additional block download timeout per parallel downloading peer (i.e. 5 min)
  */
 static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
 /**
  * Maximum number of headers to announce when relaying blocks with headers
  * message.
  */
 static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
 /** Maximum number of unconnecting headers announcements before DoS score */
 static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
 /** Minimum blocks required to signal NODE_NETWORK_LIMITED */
 static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
 /**
  * Average delay between local address broadcasts.
  */
 static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
 /**
  * Average delay between peer address broadcasts.
  */
 static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
 /** Delay between rotating the peers we relay a particular address to */
 static constexpr auto ROTATE_ADDR_RELAY_DEST_INTERVAL{24h};
 /**
  * Average delay between trickled inventory transmissions for inbound peers.
  * Blocks and peers with NetPermissionFlags::NoBan permission bypass this.
  */
 static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
 /**
  * Maximum rate of inventory items to send per second.
  * Limits the impact of low-fee transaction floods.
  */
 static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
 /** Maximum number of inventory items to send per transmission. */
 static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB =
     INVENTORY_BROADCAST_PER_SECOND *
     count_seconds(INBOUND_INVENTORY_BROADCAST_INTERVAL);
 /** The number of most recently announced transactions a peer can request. */
 static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
 /**
  * Verify that INVENTORY_MAX_RECENT_RELAY is enough to cache everything
  * typically relayed before unconditional relay from the mempool kicks in. This
  * is only a lower bound, and it should be larger to account for higher inv rate
  * to outbound peers, and random variations in the broadcast mechanism.
  */
 static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND *
                                                 UNCONDITIONAL_RELAY_DELAY /
                                                 std::chrono::seconds{1},
               "INVENTORY_RELAY_MAX too low");
 
 /**
  * Average delay between feefilter broadcasts
  */
 static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
 /**
  * Maximum feefilter broadcast delay after significant change.
  */
 static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
 /**
  * Maximum number of compact filters that may be requested with one
  * getcfilters. See BIP 157.
  */
 static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
 /**
  * Maximum number of cf hashes that may be requested with one getcfheaders. See
  * BIP 157.
  */
 static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000;
 /**
  * the maximum percentage of addresses from our addrman to return in response
  * to a getaddr message.
  */
 static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23;
 /** The maximum number of address records permitted in an ADDR message. */
 static constexpr size_t MAX_ADDR_TO_SEND{1000};
 /**
  * The maximum rate of address records we're willing to process on average. Can
  * be bypassed using the NetPermissionFlags::Addr permission.
  */
 static constexpr double MAX_ADDR_RATE_PER_SECOND{0.1};
 /**
  * The soft limit of the address processing token bucket (the regular
  * MAX_ADDR_RATE_PER_SECOND based increments won't go above this, but the
  * MAX_ADDR_TO_SEND increment following GETADDR is exempt from this limit).
  */
 static constexpr size_t MAX_ADDR_PROCESSING_TOKEN_BUCKET{MAX_ADDR_TO_SEND};
 /** The compactblocks version we support. See BIP 152. */
 static constexpr uint64_t CMPCTBLOCKS_VERSION{1};
 
 inline size_t GetMaxAddrToSend() {
     return gArgs.GetIntArg("-maxaddrtosend", MAX_ADDR_TO_SEND);
 }
 
 // Internal stuff
 namespace {
 /**
  * Blocks that are in flight, and that are in the queue to be downloaded.
  */
 struct QueuedBlock {
     /**
      * BlockIndex. We must have this since we only request blocks when we've
      * already validated the header.
      */
     const CBlockIndex *pindex;
     /** Optional, used for CMPCTBLOCK downloads */
     std::unique_ptr<PartiallyDownloadedBlock> partialBlock;
 };
 
 /**
  * Data structure for an individual peer. This struct is not protected by
  * cs_main since it does not contain validation-critical data.
  *
  * Memory is owned by shared pointers and this object is destructed when
  * the refcount drops to zero.
  *
  * Mutexes inside this struct must not be held when locking m_peer_mutex.
  *
  * TODO: move most members from CNodeState to this structure.
  * TODO: move remaining application-layer data members from CNode to this
  * structure.
  */
 struct Peer {
     /** Same id as the CNode object for this peer */
     const NodeId m_id{0};
 
     /**
      * Services we offered to this peer.
      *
      * This is supplied by CConnman during peer initialization. It's const
      * because there is no protocol defined for renegotiating services
      * initially offered to a peer. The set of local services we offer should
      * not change after initialization.
      *
      * An interesting example of this is NODE_NETWORK and initial block
      * download: a node which starts up from scratch doesn't have any blocks
      * to serve, but still advertises NODE_NETWORK because it will eventually
      * fulfill this role after IBD completes. P2P code is written in such a
      * way that it can gracefully handle peers who don't make good on their
      * service advertisements.
      */
     const ServiceFlags m_our_services;
 
     /** Services this peer offered to us. */
     std::atomic<ServiceFlags> m_their_services{NODE_NONE};
 
     /** Protects misbehavior data members */
     Mutex m_misbehavior_mutex;
     /** Accumulated misbehavior score for this peer */
     int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
     /** Whether this peer should be disconnected and marked as discouraged
      * (unless it has NetPermissionFlags::NoBan permission). */
     bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
 
     /** Protects block inventory data members */
     Mutex m_block_inv_mutex;
     /**
      * List of blocks that we'll anounce via an `inv` message.
      * There is no final sorting before sending, as they are always sent
      * immediately and in the order requested.
      */
     std::vector<BlockHash> m_blocks_for_inv_relay GUARDED_BY(m_block_inv_mutex);
     /**
      * Unfiltered list of blocks that we'd like to announce via a `headers`
      * message. If we can't announce via a `headers` message, we'll fall back to
      * announcing via `inv`.
      */
     std::vector<BlockHash>
         m_blocks_for_headers_relay GUARDED_BY(m_block_inv_mutex);
 
     /**
      * The final block hash that we sent in an `inv` message to this peer.
      * When the peer requests this block, we send an `inv` message to trigger
      * the peer to request the next sequence of block hashes.
      * Most peers use headers-first syncing, which doesn't use this mechanism
      */
     BlockHash m_continuation_block GUARDED_BY(m_block_inv_mutex){};
 
     /** This peer's reported block height when we connected */
     std::atomic<int> m_starting_height{-1};
 
     /** The pong reply we're expecting, or 0 if no pong expected. */
     std::atomic<uint64_t> m_ping_nonce_sent{0};
     /** When the last ping was sent, or 0 if no ping was ever sent */
     std::atomic<std::chrono::microseconds> m_ping_start{0us};
     /** Whether a ping has been requested by the user */
     std::atomic<bool> m_ping_queued{false};
 
     /**
      * The feerate in the most recent BIP133 `feefilter` message sent to the
      * peer.
      * It is *not* a p2p protocol violation for the peer to send us
      * transactions with a lower fee rate than this. See BIP133.
      */
     Amount m_fee_filter_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
         Amount::zero()};
     std::chrono::microseconds m_next_send_feefilter
         GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
 
     struct TxRelay {
         mutable RecursiveMutex m_bloom_filter_mutex;
         /**
          * Whether the peer wishes to receive transaction announcements.
          *
          * This is initially set based on the fRelay flag in the received
          * `version` message. If initially set to false, it can only be flipped
          * to true if we have offered the peer NODE_BLOOM services and it sends
          * us a `filterload` or `filterclear` message. See BIP37.
          */
         bool m_relay_txs GUARDED_BY(m_bloom_filter_mutex){false};
         /**
          * A bloom filter for which transactions to announce to the peer.
          * See BIP37.
          */
         std::unique_ptr<CBloomFilter>
             m_bloom_filter PT_GUARDED_BY(m_bloom_filter_mutex)
                 GUARDED_BY(m_bloom_filter_mutex){nullptr};
 
         /** A rolling bloom filter of all announced tx CInvs to this peer. */
         CRollingBloomFilter m_recently_announced_invs GUARDED_BY(
             NetEventsInterface::g_msgproc_mutex){INVENTORY_MAX_RECENT_RELAY,
                                                  0.000001};
 
         mutable RecursiveMutex m_tx_inventory_mutex;
         /**
          * A filter of all the txids that the peer has announced to us or we
          * have announced to the peer. We use this to avoid announcing
          * the same txid to a peer that already has the transaction.
          */
         CRollingBloomFilter m_tx_inventory_known_filter
             GUARDED_BY(m_tx_inventory_mutex){50000, 0.000001};
         /**
          * Set of transaction ids we still have to announce. We use the
          * mempool to sort transactions in dependency order before relay, so
          * this does not have to be sorted.
          */
         std::set<TxId> m_tx_inventory_to_send GUARDED_BY(m_tx_inventory_mutex);
         /**
          * Whether the peer has requested us to send our complete mempool. Only
          * permitted if the peer has NetPermissionFlags::Mempool.
          * See BIP35.
          */
         bool m_send_mempool GUARDED_BY(m_tx_inventory_mutex){false};
         /** The last time a BIP35 `mempool` request was serviced. */
         std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
         /**
          * The next time after which we will send an `inv` message containing
          * transaction announcements to this peer.
          */
         std::chrono::microseconds m_next_inv_send_time
             GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
 
         /**
          * Minimum fee rate with which to filter transaction announcements to
          * this node. See BIP133.
          */
         std::atomic<Amount> m_fee_filter_received{Amount::zero()};
     };
 
     /*
      * Initializes a TxRelay struct for this peer. Can be called at most once
      * for a peer.
      */
     TxRelay *SetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
         LOCK(m_tx_relay_mutex);
         Assume(!m_tx_relay);
         m_tx_relay = std::make_unique<Peer::TxRelay>();
         return m_tx_relay.get();
     };
 
     TxRelay *GetTxRelay() EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
         return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
     };
     const TxRelay *GetTxRelay() const
         EXCLUSIVE_LOCKS_REQUIRED(!m_tx_relay_mutex) {
         return WITH_LOCK(m_tx_relay_mutex, return m_tx_relay.get());
     };
 
     struct ProofRelay {
         mutable RecursiveMutex m_proof_inventory_mutex;
         std::set<avalanche::ProofId>
             m_proof_inventory_to_send GUARDED_BY(m_proof_inventory_mutex);
         // Prevent sending proof invs if the peer already knows about them
         CRollingBloomFilter m_proof_inventory_known_filter
             GUARDED_BY(m_proof_inventory_mutex){10000, 0.000001};
         /**
          * A rolling bloom filter of all announced Proofs CInvs to this peer.
          */
         CRollingBloomFilter m_recently_announced_proofs GUARDED_BY(
             NetEventsInterface::g_msgproc_mutex){INVENTORY_MAX_RECENT_RELAY,
                                                  0.000001};
         std::chrono::microseconds m_next_inv_send_time{0};
 
         RadixTree<const avalanche::Proof, avalanche::ProofRadixTreeAdapter>
             sharedProofs;
         std::atomic<std::chrono::seconds> lastSharedProofsUpdate{0s};
         std::atomic<bool> compactproofs_requested{false};
     };
 
     /**
      * Proof relay data. Will be a nullptr if we're not relaying
      * proofs with this peer
      */
     const std::unique_ptr<ProofRelay> m_proof_relay;
 
     /**
      * A vector of addresses to send to the peer, limited to MAX_ADDR_TO_SEND.
      */
     std::vector<CAddress>
         m_addrs_to_send GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
     /**
      * Probabilistic filter to track recent addr messages relayed with this
      * peer. Used to avoid relaying redundant addresses to this peer.
      *
      *  We initialize this filter for outbound peers (other than
      *  block-relay-only connections) or when an inbound peer sends us an
      *  address related message (ADDR, ADDRV2, GETADDR).
      *
      *  Presence of this filter must correlate with m_addr_relay_enabled.
      **/
     std::unique_ptr<CRollingBloomFilter>
         m_addr_known GUARDED_BY(NetEventsInterface::g_msgproc_mutex);
     /**
      * Whether we are participating in address relay with this connection.
      *
      * We set this bool to true for outbound peers (other than
      * block-relay-only connections), or when an inbound peer sends us an
      * address related message (ADDR, ADDRV2, GETADDR).
      *
      * We use this bool to decide whether a peer is eligible for gossiping
      * addr messages. This avoids relaying to peers that are unlikely to
      * forward them, effectively blackholing self announcements. Reasons
      * peers might support addr relay on the link include that they connected
      * to us as a block-relay-only peer or they are a light client.
      *
      * This field must correlate with whether m_addr_known has been
      * initialized.
      */
     std::atomic_bool m_addr_relay_enabled{false};
     /** Whether a getaddr request to this peer is outstanding. */
     bool m_getaddr_sent GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
     /** Guards address sending timers. */
     mutable Mutex m_addr_send_times_mutex;
     /** Time point to send the next ADDR message to this peer. */
     std::chrono::microseconds
         m_next_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
     /** Time point to possibly re-announce our local address to this peer. */
     std::chrono::microseconds
         m_next_local_addr_send GUARDED_BY(m_addr_send_times_mutex){0};
     /**
      * Whether the peer has signaled support for receiving ADDRv2 (BIP155)
      * messages, indicating a preference to receive ADDRv2 instead of ADDR ones.
      */
     std::atomic_bool m_wants_addrv2{false};
     /** Whether this peer has already sent us a getaddr message. */
     bool m_getaddr_recvd GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
     /** Guards m_addr_token_bucket */
     mutable Mutex m_addr_token_bucket_mutex;
     /**
      * Number of addresses that can be processed from this peer. Start at 1
      * to permit self-announcement.
      */
     double m_addr_token_bucket GUARDED_BY(m_addr_token_bucket_mutex){1.0};
     /** When m_addr_token_bucket was last updated */
     std::chrono::microseconds
         m_addr_token_timestamp GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
             GetTime<std::chrono::microseconds>()};
     /** Total number of addresses that were dropped due to rate limiting. */
     std::atomic<uint64_t> m_addr_rate_limited{0};
     /**
      * Total number of addresses that were processed (excludes rate-limited
      * ones).
      */
     std::atomic<uint64_t> m_addr_processed{0};
 
     /**
      * Whether we've sent this peer a getheaders in response to an inv prior to
      * initial-headers-sync completing
      */
     bool m_inv_triggered_getheaders_before_sync
         GUARDED_BY(NetEventsInterface::g_msgproc_mutex){false};
 
     /** Protects m_getdata_requests **/
     Mutex m_getdata_requests_mutex;
     /** Work queue of items requested by this peer **/
     std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
 
     /** Time of the last getheaders message to this peer */
     NodeClock::time_point m_last_getheaders_timestamp
         GUARDED_BY(NetEventsInterface::g_msgproc_mutex){};
 
     /** Protects m_headers_sync **/
     Mutex m_headers_sync_mutex;
     /**
      * Headers-sync state for this peer (eg for initial sync, or syncing large
      * reorgs)
      **/
     std::unique_ptr<HeadersSyncState>
         m_headers_sync PT_GUARDED_BY(m_headers_sync_mutex)
             GUARDED_BY(m_headers_sync_mutex){};
 
     /** Whether we've sent our peer a sendheaders message. **/
     std::atomic<bool> m_sent_sendheaders{false};
 
     /** Length of current-streak of unconnecting headers announcements */
     int m_num_unconnecting_headers_msgs
         GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
 
     /** When to potentially disconnect peer for stalling headers download */
     std::chrono::microseconds m_headers_sync_timeout
         GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
 
     /**
      * Whether this peer wants invs or headers (when possible) for block
      * announcements
      */
     bool m_prefers_headers GUARDED_BY(NetEventsInterface::g_msgproc_mutex){
         false};
 
     explicit Peer(NodeId id, ServiceFlags our_services)
         : m_id(id), m_our_services{our_services},
           m_proof_relay(isAvalancheEnabled(gArgs)
                             ? std::make_unique<ProofRelay>()
                             : nullptr) {}
 
 private:
     mutable Mutex m_tx_relay_mutex;
 
     /**
      * Transaction relay data. Will be a nullptr if we're not relaying
      * transactions with this peer (e.g. if it's a block-relay-only peer or
      * the peer has sent us fRelay=false with bloom filters disabled).
      */
     std::unique_ptr<TxRelay> m_tx_relay GUARDED_BY(m_tx_relay_mutex);
 };
 
 using PeerRef = std::shared_ptr<Peer>;
 
 /**
  * Maintain validation-specific state about nodes, protected by cs_main, instead
  * by CNode's own locks. This simplifies asynchronous operation, where
  * processing of incoming data is done after the ProcessMessage call returns,
  * and we're no longer holding the node's locks.
  */
 struct CNodeState {
     //! The best known block we know this peer has announced.
     const CBlockIndex *pindexBestKnownBlock{nullptr};
     //! The hash of the last unknown block this peer has announced.
     BlockHash hashLastUnknownBlock{};
     //! The last full block we both have.
     const CBlockIndex *pindexLastCommonBlock{nullptr};
     //! The best header we have sent our peer.
     const CBlockIndex *pindexBestHeaderSent{nullptr};
     //! Whether we've started headers synchronization with this peer.
     bool fSyncStarted{false};
     //! Since when we're stalling block download progress (in microseconds), or
     //! 0.
     std::chrono::microseconds m_stalling_since{0us};
     std::list<QueuedBlock> vBlocksInFlight;
     //! When the first entry in vBlocksInFlight started downloading. Don't care
     //! when vBlocksInFlight is empty.
     std::chrono::microseconds m_downloading_since{0us};
     int nBlocksInFlight{0};
     //! Whether we consider this a preferred download peer.
     bool fPreferredDownload{false};
     /**
      * Whether this peer wants invs or cmpctblocks (when possible) for block
      * announcements.
      */
     bool m_requested_hb_cmpctblocks{false};
     /** Whether this peer will send us cmpctblocks if we request them. */
     bool m_provides_cmpctblocks{false};
 
     /**
      * State used to enforce CHAIN_SYNC_TIMEOUT and EXTRA_PEER_CHECK_INTERVAL
      * logic.
      *
      * Both are only in effect for outbound, non-manual, non-protected
      * connections. Any peer protected (m_protect = true) is not chosen for
      * eviction. A peer is marked as protected if all of these are true:
      *   - its connection type is IsBlockOnlyConn() == false
      *   - it gave us a valid connecting header
      *   - we haven't reached MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT yet
      *   - it has a better chain than we have
      *
      * CHAIN_SYNC_TIMEOUT:  if a peer's best known block has less work than our
      * tip, set a timeout CHAIN_SYNC_TIMEOUT in the future:
      *   - If at timeout their best known block now has more work than our tip
      * when the timeout was set, then either reset the timeout or clear it
      * (after comparing against our current tip's work)
      *   - If at timeout their best known block still has less work than our tip
      * did when the timeout was set, then send a getheaders message, and set a
      * shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best
      * known block is still behind when that new timeout is reached, disconnect.
      *
      * EXTRA_PEER_CHECK_INTERVAL: after each interval, if we have too many
      * outbound peers, drop the outbound one that least recently announced us a
      * new block.
      */
     struct ChainSyncTimeoutState {
         //! A timeout used for checking whether our peer has sufficiently
         //! synced.
         std::chrono::seconds m_timeout{0s};
         //! A header with the work we require on our peer's chain.
         const CBlockIndex *m_work_header{nullptr};
         //! After timeout is reached, set to true after sending getheaders.
         bool m_sent_getheaders{false};
         //! Whether this peer is protected from disconnection due to a bad/slow
         //! chain.
         bool m_protect{false};
     };
 
     ChainSyncTimeoutState m_chain_sync;
 
     //! Time of last new block announcement
     int64_t m_last_block_announcement{0};
 
     //! Whether this peer is an inbound connection
     const bool m_is_inbound;
 
     CNodeState(bool is_inbound) : m_is_inbound(is_inbound) {}
 };
 
 class PeerManagerImpl final : public PeerManager {
 public:
     PeerManagerImpl(CConnman &connman, AddrMan &addrman, BanMan *banman,
                     ChainstateManager &chainman, CTxMemPool &pool,
+                    avalanche::Processor *const avalanche,
                     bool ignore_incoming_txs);
 
     /** Overridden from CValidationInterface. */
     void BlockConnected(const std::shared_ptr<const CBlock> &pblock,
                         const CBlockIndex *pindexConnected) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
     void BlockDisconnected(const std::shared_ptr<const CBlock> &block,
                            const CBlockIndex *pindex) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_recent_confirmed_transactions_mutex);
     void UpdatedBlockTip(const CBlockIndex *pindexNew,
                          const CBlockIndex *pindexFork,
                          bool fInitialDownload) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void BlockChecked(const CBlock &block,
                       const BlockValidationState &state) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void NewPoWValidBlock(const CBlockIndex *pindex,
                           const std::shared_ptr<const CBlock> &pblock) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
 
     /** Implement NetEventsInterface */
     void InitializeNode(const Config &config, CNode &node,
                         ServiceFlags our_services) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void FinalizeNode(const Config &config, const CNode &node) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest,
                                  !m_headers_presync_mutex);
     bool ProcessMessages(const Config &config, CNode *pfrom,
                          std::atomic<bool> &interrupt) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
                                  !m_recent_confirmed_transactions_mutex,
                                  !m_most_recent_block_mutex, !cs_proofrequest,
                                  !m_headers_presync_mutex, g_msgproc_mutex);
     bool SendMessages(const Config &config, CNode *pto) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
                                  !m_recent_confirmed_transactions_mutex,
                                  !m_most_recent_block_mutex, !cs_proofrequest,
                                  g_msgproc_mutex);
 
     /** Implement PeerManager */
     void StartScheduledTasks(CScheduler &scheduler) override;
     void CheckForStaleTipAndEvictPeers() override;
     std::optional<std::string>
     FetchBlock(const Config &config, NodeId peer_id,
                const CBlockIndex &block_index) override;
     bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) const override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
     void SendPings() override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void RelayTransaction(const TxId &txid) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void RelayProof(const avalanche::ProofId &proofid) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
     void SetBestHeight(int height) override { m_best_height = height; };
     void UnitTestMisbehaving(NodeId peer_id, const int howmuch) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) {
         Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, "");
     }
     void ProcessMessage(const Config &config, CNode &pfrom,
                         const std::string &msg_type, CDataStream &vRecv,
                         const std::chrono::microseconds time_received,
                         const std::atomic<bool> &interruptMsgProc) override
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex,
                                  !m_recent_confirmed_transactions_mutex,
                                  !m_most_recent_block_mutex, !cs_proofrequest,
                                  !m_headers_presync_mutex, g_msgproc_mutex);
     void UpdateLastBlockAnnounceTime(NodeId node,
                                      int64_t time_in_seconds) override;
 
 private:
     /**
      * Consider evicting an outbound peer based on the amount of time they've
      * been behind our tip.
      */
     void ConsiderEviction(CNode &pto, Peer &peer,
                           std::chrono::seconds time_in_seconds)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_msgproc_mutex);
 
     /**
      * If we have extra outbound peers, try to disconnect the one with the
      * oldest block announcement.
      */
     void EvictExtraOutboundPeers(std::chrono::seconds now)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Retrieve unbroadcast transactions from the mempool and reattempt
      * sending to peers
      */
     void ReattemptInitialBroadcast(CScheduler &scheduler)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Update the avalanche statistics for all the nodes
      */
     void UpdateAvalancheStatistics() const;
 
     /**
      * Process periodic avalanche network messaging and cleanups.
      */
     void AvalanchePeriodicNetworking(CScheduler &scheduler) const;
 
     /**
      * Get a shared pointer to the Peer object.
      * May return an empty shared_ptr if the Peer object can't be found.
      */
     PeerRef GetPeerRef(NodeId id) const EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Get a shared pointer to the Peer object and remove it from m_peer_map.
      * May return an empty shared_ptr if the Peer object can't be found.
      */
     PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Increment peer's misbehavior score. If the new value >=
      * DISCOURAGEMENT_THRESHOLD, mark the node to be discouraged, meaning the
      * peer might be disconnected and added to the discouragement filter.
      */
     void Misbehaving(Peer &peer, int howmuch, const std::string &message);
 
     /**
      * Potentially mark a node discouraged based on the contents of a
      * BlockValidationState object
      *
      * @param[in] via_compact_block this bool is passed in because
      * net_processing should punish peers differently depending on whether the
      * data was provided in a compact block message or not. If the compact block
      * had a valid header, but contained invalid txs, the peer should not be
      * punished. See BIP 152.
      *
      * @return Returns true if the peer was punished (probably disconnected)
      */
     bool MaybePunishNodeForBlock(NodeId nodeid,
                                  const BlockValidationState &state,
                                  bool via_compact_block,
                                  const std::string &message = "")
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Potentially disconnect and discourage a node based on the contents of a
      * TxValidationState object
      *
      * @return Returns true if the peer was punished (probably disconnected)
      */
     bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state,
                               const std::string &message = "")
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
 
     /**
      * Maybe disconnect a peer and discourage future connections from its
      * address.
      *
      * @param[in]   pnode     The node to check.
      * @param[in]   peer      The peer object to check.
      * @return                True if the peer was marked for disconnection in
      * this function
      */
     bool MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer);
 
     /**
      * Reconsider orphan transactions after a parent has been accepted to the
      * mempool.
      *
      * @peer[in]  peer     The peer whose orphan transactions we will
      *                     reconsider. Generally only one orphan will be
      *                     reconsidered on each call of this function. If an
      *                     accepted orphan has orphaned children, those will
      *                     need to be reconsidered, creating more work, possibly
      *                     for other peers.
      * @return             True if meaningful work was done (an orphan was
      *                     accepted/rejected).
      *                     If no meaningful work was done, then the work set for
      *                     this peer will be empty.
      */
     bool ProcessOrphanTx(const Config &config, Peer &peer)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
 
     /**
      * Process a single headers message from a peer.
      *
      * @param[in]   pfrom     CNode of the peer
      * @param[in]   peer      The peer sending us the headers
      * @param[in]   headers   The headers received. Note that this may be
      *                        modified within ProcessHeadersMessage.
      * @param[in]   via_compact_block   Whether this header came in via compact
      *                                  block handling.
      */
     void ProcessHeadersMessage(const Config &config, CNode &pfrom, Peer &peer,
                                std::vector<CBlockHeader> &&headers,
                                bool via_compact_block)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_headers_presync_mutex,
                                  g_msgproc_mutex);
 
     // Various helpers for headers processing, invoked by
     // ProcessHeadersMessage()
     /**
      * Return true if headers are continuous and have valid proof-of-work
      * (DoS points assigned on failure)
      */
     bool CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
                          const Consensus::Params &consensusParams, Peer &peer);
     /** Calculate an anti-DoS work threshold for headers chains */
     arith_uint256 GetAntiDoSWorkThreshold();
     /**
      * Deal with state tracking and headers sync for peers that send the
      * occasional non-connecting header (this can happen due to BIP 130 headers
      * announcements for blocks interacting with the 2hr
      * (MAX_FUTURE_BLOCK_TIME) rule).
      */
     void HandleFewUnconnectingHeaders(CNode &pfrom, Peer &peer,
                                       const std::vector<CBlockHeader> &headers)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
     /** Return true if the headers connect to each other, false otherwise */
     bool
     CheckHeadersAreContinuous(const std::vector<CBlockHeader> &headers) const;
     /**
      * Try to continue a low-work headers sync that has already begun.
      * Assumes the caller has already verified the headers connect, and has
      * checked that each header satisfies the proof-of-work target included in
      * the header.
      *  @param[in]  peer                            The peer we're syncing with.
      *  @param[in]  pfrom                           CNode of the peer
      *  @param[in,out] headers                      The headers to be processed.
      *  @return     True if the passed in headers were successfully processed
      *              as the continuation of a low-work headers sync in progress;
      *              false otherwise.
      *              If false, the passed in headers will be returned back to
      *              the caller.
      *              If true, the returned headers may be empty, indicating
      *              there is no more work for the caller to do; or the headers
      *              may be populated with entries that have passed anti-DoS
      *              checks (and therefore may be validated for block index
      *              acceptance by the caller).
      */
     bool IsContinuationOfLowWorkHeadersSync(Peer &peer, CNode &pfrom,
                                             std::vector<CBlockHeader> &headers)
         EXCLUSIVE_LOCKS_REQUIRED(peer.m_headers_sync_mutex,
                                  !m_headers_presync_mutex, g_msgproc_mutex);
     /**
      * Check work on a headers chain to be processed, and if insufficient,
      * initiate our anti-DoS headers sync mechanism.
      *
      * @param[in]   peer                The peer whose headers we're processing.
      * @param[in]   pfrom               CNode of the peer
      * @param[in]   chain_start_header  Where these headers connect in our
      *                                  index.
      * @param[in,out]   headers             The headers to be processed.
      *
      * @return      True if chain was low work and a headers sync was
      *              initiated (and headers will be empty after calling); false
      *              otherwise.
      */
     bool TryLowWorkHeadersSync(Peer &peer, CNode &pfrom,
                                const CBlockIndex *chain_start_header,
                                std::vector<CBlockHeader> &headers)
         EXCLUSIVE_LOCKS_REQUIRED(!peer.m_headers_sync_mutex, !m_peer_mutex,
                                  !m_headers_presync_mutex, g_msgproc_mutex);
 
     /**
      * Return true if the given header is an ancestor of
      * m_chainman.m_best_header or our current tip
      */
     bool IsAncestorOfBestHeaderOrTip(const CBlockIndex *header)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Request further headers from this peer with a given locator.
      * We don't issue a getheaders message if we have a recent one outstanding.
      * This returns true if a getheaders is actually sent, and false otherwise.
      */
     bool MaybeSendGetHeaders(CNode &pfrom, const CBlockLocator &locator,
                              Peer &peer)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
     /**
      * Potentially fetch blocks from this peer upon receipt of new headers tip
      */
     void HeadersDirectFetchBlocks(const Config &config, CNode &pfrom,
                                   const CBlockIndex *pindexLast);
     /** Update peer state based on received headers message */
     void UpdatePeerStateForReceivedHeaders(CNode &pfrom, Peer &peer,
                                            const CBlockIndex *pindexLast,
                                            bool received_new_header,
                                            bool may_have_more_headers)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     void SendBlockTransactions(CNode &pfrom, Peer &peer, const CBlock &block,
                                const BlockTransactionsRequest &req);
 
     /**
      * Register with InvRequestTracker that a TX INV has been received from a
      * peer. The announcement parameters are decided in PeerManager and then
      * passed to InvRequestTracker.
      */
     void AddTxAnnouncement(const CNode &node, const TxId &txid,
                            std::chrono::microseconds current_time)
         EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
 
     /**
      * Register with InvRequestTracker that a PROOF INV has been received from a
      * peer. The announcement parameters are decided in PeerManager and then
      * passed to InvRequestTracker.
      */
     void
     AddProofAnnouncement(const CNode &node, const avalanche::ProofId &proofid,
                          std::chrono::microseconds current_time, bool preferred)
         EXCLUSIVE_LOCKS_REQUIRED(cs_proofrequest);
 
     /** Send a version message to a peer */
     void PushNodeVersion(const Config &config, CNode &pnode, const Peer &peer);
 
     /**
      * Send a ping message every PING_INTERVAL or if requested via RPC. May mark
      * the peer to be disconnected if a ping has timed out.
      * We use mockable time for ping timeouts, so setmocktime may cause pings
      * to time out.
      */
     void MaybeSendPing(CNode &node_to, Peer &peer,
                        std::chrono::microseconds now);
 
     /** Send `addr` messages on a regular schedule. */
     void MaybeSendAddr(CNode &node, Peer &peer,
                        std::chrono::microseconds current_time)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     /**
      * Send a single `sendheaders` message, after we have completed headers
      * sync with a peer.
      */
     void MaybeSendSendHeaders(CNode &node, Peer &peer)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     /** Send `feefilter` message. */
     void MaybeSendFeefilter(CNode &node, Peer &peer,
                             std::chrono::microseconds current_time)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     /**
      * Relay (gossip) an address to a few randomly chosen nodes.
      *
      * @param[in] originator   The id of the peer that sent us the address. We
      *                         don't want to relay it back.
      * @param[in] addr         Address to relay.
      * @param[in] fReachable   Whether the address' network is reachable. We
      *                         relay unreachable addresses less.
      */
     void RelayAddress(NodeId originator, const CAddress &addr, bool fReachable)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, g_msgproc_mutex);
 
     const CChainParams &m_chainparams;
     CConnman &m_connman;
     AddrMan &m_addrman;
     /**
      * Pointer to this node's banman. May be nullptr - check existence before
      * dereferencing.
      */
     BanMan *const m_banman;
     ChainstateManager &m_chainman;
     CTxMemPool &m_mempool;
+    avalanche::Processor *const m_avalanche;
     InvRequestTracker<TxId> m_txrequest GUARDED_BY(::cs_main);
 
     Mutex cs_proofrequest;
     InvRequestTracker<avalanche::ProofId>
         m_proofrequest GUARDED_BY(cs_proofrequest);
 
     /** The height of the best chain */
     std::atomic<int> m_best_height{-1};
 
     /** Next time to check for stale tip */
     std::chrono::seconds m_stale_tip_check_time{0s};
 
     /** Whether this node is running in blocks only mode */
     const bool m_ignore_incoming_txs;
 
     /**
      * Whether we've completed initial sync yet, for determining when to turn
      * on extra block-relay-only peers.
      */
     bool m_initial_sync_finished{false};
 
     /**
      * Protects m_peer_map. This mutex must not be locked while holding a lock
      * on any of the mutexes inside a Peer object.
      */
     mutable Mutex m_peer_mutex;
     /**
      * Map of all Peer objects, keyed by peer id. This map is protected
      * by the m_peer_mutex. Once a shared pointer reference is
      * taken, the lock may be released. Individual fields are protected by
      * their own locks.
      */
     std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
 
     /** Map maintaining per-node state. */
     std::map<NodeId, CNodeState> m_node_states GUARDED_BY(cs_main);
 
     /**
      * Get a pointer to a const CNodeState, used when not mutating the
      * CNodeState object.
      */
     const CNodeState *State(NodeId pnode) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /** Get a pointer to a mutable CNodeState. */
     CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
 
     /** Number of nodes with fSyncStarted. */
     int nSyncStarted GUARDED_BY(cs_main) = 0;
 
     /** Hash of the last block we received via INV */
     BlockHash
         m_last_block_inv_triggering_headers_sync GUARDED_BY(g_msgproc_mutex){};
 
     /**
      * Sources of received blocks, saved to be able to punish them when
      * processing happens afterwards.
      * Set mapBlockSource[hash].second to false if the node should not be
      * punished if the block is invalid.
      */
     std::map<BlockHash, std::pair<NodeId, bool>>
         mapBlockSource GUARDED_BY(cs_main);
 
     /** Number of outbound peers with m_chain_sync.m_protect. */
     int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0;
 
     /** Number of preferable block download peers. */
     int m_num_preferred_download_peers GUARDED_BY(cs_main){0};
 
     /** Stalling timeout for blocks in IBD */
     std::atomic<std::chrono::seconds> m_block_stalling_timeout{
         BLOCK_STALLING_TIMEOUT_DEFAULT};
 
     bool AlreadyHaveTx(const TxId &txid)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main,
                                  !m_recent_confirmed_transactions_mutex);
 
     /**
      * Filter for transactions that were recently rejected by the mempool.
      * These are not rerequested until the chain tip changes, at which point
      * the entire filter is reset.
      *
      * Without this filter we'd be re-requesting txs from each of our peers,
      * increasing bandwidth consumption considerably. For instance, with 100
      * peers, half of which relay a tx we don't accept, that might be a 50x
      * bandwidth increase. A flooding attacker attempting to roll-over the
      * filter using minimum-sized, 60byte, transactions might manage to send
      * 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
      * two minute window to send invs to us.
      *
      * Decreasing the false positive rate is fairly cheap, so we pick one in a
      * million to make it highly unlikely for users to have issues with this
      * filter.
      *
      * Memory used: 1.3 MB
      */
     CRollingBloomFilter m_recent_rejects GUARDED_BY(::cs_main){120'000,
                                                                0.000'001};
     uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main);
 
     /**
      * Filter for transactions that have been recently confirmed.
      * We use this to avoid requesting transactions that have already been
      * confirmed.
      */
     mutable Mutex m_recent_confirmed_transactions_mutex;
     CRollingBloomFilter m_recent_confirmed_transactions
         GUARDED_BY(m_recent_confirmed_transactions_mutex){24'000, 0.000'001};
 
     /**
      * For sending `inv`s to inbound peers, we use a single (exponentially
      * distributed) timer for all peers. If we used a separate timer for each
      * peer, a spy node could make multiple inbound connections to us to
      * accurately determine when we received the transaction (and potentially
      * determine the transaction's origin).
      */
     std::chrono::microseconds
     NextInvToInbounds(std::chrono::microseconds now,
                       std::chrono::seconds average_interval);
 
     // All of the following cache a recent block, and are protected by
     // m_most_recent_block_mutex
     mutable Mutex m_most_recent_block_mutex;
     std::shared_ptr<const CBlock>
         m_most_recent_block GUARDED_BY(m_most_recent_block_mutex);
     std::shared_ptr<const CBlockHeaderAndShortTxIDs>
         m_most_recent_compact_block GUARDED_BY(m_most_recent_block_mutex);
     BlockHash m_most_recent_block_hash GUARDED_BY(m_most_recent_block_mutex);
 
     // Data about the low-work headers synchronization, aggregated from all
     // peers' HeadersSyncStates.
     /** Mutex guarding the other m_headers_presync_* variables. */
     Mutex m_headers_presync_mutex;
     /**
      * A type to represent statistics about a peer's low-work headers sync.
      *
      * - The first field is the total verified amount of work in that
      *   synchronization.
      * - The second is:
      *   - nullopt: the sync is in REDOWNLOAD phase (phase 2).
      *   - {height, timestamp}: the sync has the specified tip height and block
      *     timestamp (phase 1).
      */
     using HeadersPresyncStats =
         std::pair<arith_uint256, std::optional<std::pair<int64_t, uint32_t>>>;
     /** Statistics for all peers in low-work headers sync. */
     std::map<NodeId, HeadersPresyncStats>
         m_headers_presync_stats GUARDED_BY(m_headers_presync_mutex){};
     /** The peer with the most-work entry in m_headers_presync_stats. */
     NodeId m_headers_presync_bestpeer GUARDED_BY(m_headers_presync_mutex){-1};
     /** The m_headers_presync_stats improved, and needs signalling. */
     std::atomic_bool m_headers_presync_should_signal{false};
 
     /**
      * Height of the highest block announced using BIP 152 high-bandwidth mode.
      */
     int m_highest_fast_announce GUARDED_BY(::cs_main){0};
 
     /** Have we requested this block from a peer */
     bool IsBlockRequested(const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Remove this block from our tracked requested blocks. Called if:
      *  - the block has been received from a peer
      *  - the request for the block has timed out
      */
     void RemoveBlockRequest(const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Mark a block as in flight
      * Returns false, still setting pit, if the block was already in flight from
      * the same peer pit will only be valid as long as the same cs_main lock is
      * being held
      */
     bool BlockRequested(const Config &config, NodeId nodeid,
                         const CBlockIndex &block,
                         std::list<QueuedBlock>::iterator **pit = nullptr)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Update pindexLastCommonBlock and add not-in-flight missing successors to
      * vBlocks, until it has at most count entries.
      */
     void FindNextBlocksToDownload(NodeId nodeid, unsigned int count,
                                   std::vector<const CBlockIndex *> &vBlocks,
                                   NodeId &nodeStaller)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     std::map<BlockHash, std::pair<NodeId, std::list<QueuedBlock>::iterator>>
         mapBlocksInFlight GUARDED_BY(cs_main);
 
     /** When our tip was last updated. */
     std::atomic<std::chrono::seconds> m_last_tip_update{0s};
 
     /**
      * Determine whether or not a peer can request a transaction, and return it
      * (or nullptr if not found or not allowed).
      */
     CTransactionRef FindTxForGetData(const Peer &peer, const TxId &txid,
                                      const std::chrono::seconds mempool_req,
                                      const std::chrono::seconds now)
         LOCKS_EXCLUDED(cs_main)
             EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
 
     void ProcessGetData(const Config &config, CNode &pfrom, Peer &peer,
                         const std::atomic<bool> &interruptMsgProc)
         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex,
                                  peer.m_getdata_requests_mutex,
                                  NetEventsInterface::g_msgproc_mutex)
             LOCKS_EXCLUDED(cs_main);
 
     /** Process a new block. Perform any post-processing housekeeping */
     void ProcessBlock(const Config &config, CNode &node,
                       const std::shared_ptr<const CBlock> &block,
                       bool force_processing, bool min_pow_checked);
 
     /** Relay map. */
     typedef std::map<TxId, CTransactionRef> MapRelay;
     MapRelay mapRelay GUARDED_BY(cs_main);
 
     /**
      * Expiration-time ordered list of (expire time, relay map entry) pairs,
      * protected by cs_main).
      */
     std::deque<std::pair<std::chrono::microseconds, MapRelay::iterator>>
         g_relay_expiration GUARDED_BY(cs_main);
 
     /**
      * When a peer sends us a valid block, instruct it to announce blocks to us
      * using CMPCTBLOCK if possible by adding its nodeid to the end of
      * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size
      * by removing the first element if necessary.
      */
     void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /** Stack of nodes which we have set to announce using compact blocks */
     std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main);
 
     /** Number of peers from which we're downloading blocks. */
     int m_peers_downloading_from GUARDED_BY(cs_main) = 0;
 
     /** Storage for orphan information */
     TxOrphanage m_orphanage;
 
     void AddToCompactExtraTransactions(const CTransactionRef &tx)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     /**
      * Orphan/conflicted/etc transactions that are kept for compact block
      * reconstruction.
      * The last
      * -blockreconstructionextratxn/DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN of
      * these are kept in a ring buffer
      */
     std::vector<std::pair<TxHash, CTransactionRef>>
         vExtraTxnForCompact GUARDED_BY(g_msgproc_mutex);
     /** Offset into vExtraTxnForCompact to insert the next tx */
     size_t vExtraTxnForCompactIt GUARDED_BY(g_msgproc_mutex) = 0;
 
     /**
      * Check whether the last unknown block a peer advertised is not yet known.
      */
     void ProcessBlockAvailability(NodeId nodeid)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     /**
      * Update tracking information about which blocks a peer is assumed to have.
      */
     void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool CanDirectFetch() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * To prevent fingerprinting attacks, only send blocks/headers outside of
      * the active chain if they are no more than a month older (both in time,
      * and in best equivalent proof of work) than the best header chain we know
      * about and we fully-validated them at some point.
      */
     bool BlockRequestAllowed(const CBlockIndex *pindex)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool AlreadyHaveBlock(const BlockHash &block_hash)
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
     bool AlreadyHaveProof(const avalanche::ProofId &proofid);
     void ProcessGetBlockData(const Config &config, CNode &pfrom, Peer &peer,
                              const CInv &inv)
         EXCLUSIVE_LOCKS_REQUIRED(!m_most_recent_block_mutex);
 
     /**
      * Validation logic for compact filters request handling.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   filter_type     The filter type the request is for. Must be
      *                              basic filters.
      * @param[in]   start_height    The start height for the request
      * @param[in]   stop_hash       The stop_hash for the request
      * @param[in]   max_height_diff The maximum number of items permitted to
      *                              request, as specified in BIP 157
      * @param[out]  stop_index      The CBlockIndex for the stop_hash block, if
      *                              the request can be serviced.
      * @param[out]  filter_index    The filter index, if the request can be
      *                              serviced.
      * @return                      True if the request can be serviced.
      */
     bool PrepareBlockFilterRequest(CNode &node, Peer &peer,
                                    BlockFilterType filter_type,
                                    uint32_t start_height,
                                    const BlockHash &stop_hash,
                                    uint32_t max_height_diff,
                                    const CBlockIndex *&stop_index,
                                    BlockFilterIndex *&filter_index);
 
     /**
      * Handle a cfilters request.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   vRecv           The raw message received
      */
     void ProcessGetCFilters(CNode &node, Peer &peer, CDataStream &vRecv);
     /**
      * Handle a cfheaders request.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   vRecv           The raw message received
      */
     void ProcessGetCFHeaders(CNode &node, Peer &peer, CDataStream &vRecv);
     /**
      * Handle a getcfcheckpt request.
      *
      * May disconnect from the peer in the case of a bad request.
      *
      * @param[in]   node            The node that we received the request from
      * @param[in]   peer            The peer that we received the request from
      * @param[in]   vRecv           The raw message received
      */
     void ProcessGetCFCheckPt(CNode &node, Peer &peer, CDataStream &vRecv);
 
     /**
      * Decide a response for an Avalanche poll about the given block.
      *
      * @param[in]   hash            The hash of the block being polled for
      * @return                      Our current vote for the block
      */
     uint32_t GetAvalancheVoteForBlock(const BlockHash &hash) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main);
 
     /**
      * Decide a response for an Avalanche poll about the given transaction.
      *
      * @param[in] id       The id of the transaction being polled for
      * @return             Our current vote for the transaction
      */
     uint32_t GetAvalancheVoteForTx(const TxId &id) const
         EXCLUSIVE_LOCKS_REQUIRED(cs_main,
                                  !m_recent_confirmed_transactions_mutex);
 
     /**
      * Checks if address relay is permitted with peer. If needed, initializes
      * the m_addr_known bloom filter and sets m_addr_relay_enabled to true.
      *
      *  @return   True if address relay is enabled with peer
      *            False if address relay is disallowed
      */
     bool SetupAddressRelay(const CNode &node, Peer &peer)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     void AddAddressKnown(Peer &peer, const CAddress &addr)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
     void PushAddress(Peer &peer, const CAddress &addr,
                      FastRandomContext &insecure_rand)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
 
     /**
      * Manage reception of an avalanche proof.
      *
      * @return   False if the peer is misbehaving, true otherwise
      */
     bool ReceivedAvalancheProof(CNode &node, Peer &peer,
                                 const avalanche::ProofRef &proof)
         EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !cs_proofrequest);
 
     avalanche::ProofRef FindProofForGetData(const Peer &peer,
                                             const avalanche::ProofId &proofid,
                                             const std::chrono::seconds now)
         EXCLUSIVE_LOCKS_REQUIRED(NetEventsInterface::g_msgproc_mutex);
 
     bool isPreferredDownloadPeer(const CNode &pfrom);
 };
 
 const CNodeState *PeerManagerImpl::State(NodeId pnode) const
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
     if (it == m_node_states.end()) {
         return nullptr;
     }
 
     return &it->second;
 }
 
 CNodeState *PeerManagerImpl::State(NodeId pnode)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     return const_cast<CNodeState *>(std::as_const(*this).State(pnode));
 }
 
 /**
  * Whether the peer supports the address. For example, a peer that does not
  * implement BIP155 cannot receive Tor v3 addresses because it requires
  * ADDRv2 (BIP155) encoding.
  */
 static bool IsAddrCompatible(const Peer &peer, const CAddress &addr) {
     return peer.m_wants_addrv2 || addr.IsAddrV1Compatible();
 }
 
 void PeerManagerImpl::AddAddressKnown(Peer &peer, const CAddress &addr) {
     assert(peer.m_addr_known);
     peer.m_addr_known->insert(addr.GetKey());
 }
 
 void PeerManagerImpl::PushAddress(Peer &peer, const CAddress &addr,
                                   FastRandomContext &insecure_rand) {
     // Known checking here is only to save space from duplicates.
     // Before sending, we'll filter it again for known addresses that were
     // added after addresses were pushed.
     assert(peer.m_addr_known);
     if (addr.IsValid() && !peer.m_addr_known->contains(addr.GetKey()) &&
         IsAddrCompatible(peer, addr)) {
         if (peer.m_addrs_to_send.size() >= GetMaxAddrToSend()) {
             peer.m_addrs_to_send[insecure_rand.randrange(
                 peer.m_addrs_to_send.size())] = addr;
         } else {
             peer.m_addrs_to_send.push_back(addr);
         }
     }
 }
 
 static void AddKnownTx(Peer &peer, const TxId &txid) {
     auto tx_relay = peer.GetTxRelay();
     if (!tx_relay) {
         return;
     }
 
     LOCK(tx_relay->m_tx_inventory_mutex);
     tx_relay->m_tx_inventory_known_filter.insert(txid);
 }
 
 static void AddKnownProof(Peer &peer, const avalanche::ProofId &proofid) {
     if (peer.m_proof_relay != nullptr) {
         LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
         peer.m_proof_relay->m_proof_inventory_known_filter.insert(proofid);
     }
 }
 
 bool PeerManagerImpl::isPreferredDownloadPeer(const CNode &pfrom) {
     LOCK(cs_main);
     const CNodeState *state = State(pfrom.GetId());
     return state && state->fPreferredDownload;
 }
 /** Whether this peer can serve us blocks. */
 static bool CanServeBlocks(const Peer &peer) {
     return peer.m_their_services & (NODE_NETWORK | NODE_NETWORK_LIMITED);
 }
 
 /**
  * Whether this peer can only serve limited recent blocks (e.g. because
  * it prunes old blocks)
  */
 static bool IsLimitedPeer(const Peer &peer) {
     return (!(peer.m_their_services & NODE_NETWORK) &&
             (peer.m_their_services & NODE_NETWORK_LIMITED));
 }
 
 std::chrono::microseconds
 PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
                                    std::chrono::seconds average_interval) {
     if (m_next_inv_to_inbounds.load() < now) {
         // If this function were called from multiple threads simultaneously
         // it would possible that both update the next send variable, and return
         // a different result to their caller. This is not possible in practice
         // as only the net processing thread invokes this function.
         m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
     }
     return m_next_inv_to_inbounds;
 }
 
 bool PeerManagerImpl::IsBlockRequested(const BlockHash &hash) {
     return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
 }
 
 void PeerManagerImpl::RemoveBlockRequest(const BlockHash &hash) {
     auto it = mapBlocksInFlight.find(hash);
 
     if (it == mapBlocksInFlight.end()) {
         // Block was not requested
         return;
     }
 
     auto [node_id, list_it] = it->second;
     CNodeState *state = State(node_id);
     assert(state != nullptr);
 
     if (state->vBlocksInFlight.begin() == list_it) {
         // First block on the queue was received, update the start download time
         // for the next one
         state->m_downloading_since = std::max(
             state->m_downloading_since, GetTime<std::chrono::microseconds>());
     }
     state->vBlocksInFlight.erase(list_it);
 
     state->nBlocksInFlight--;
     if (state->nBlocksInFlight == 0) {
         // Last validated block on the queue was received.
         m_peers_downloading_from--;
     }
     state->m_stalling_since = 0us;
     mapBlocksInFlight.erase(it);
 }
 
 bool PeerManagerImpl::BlockRequested(const Config &config, NodeId nodeid,
                                      const CBlockIndex &block,
                                      std::list<QueuedBlock>::iterator **pit) {
     const BlockHash &hash{block.GetBlockHash()};
 
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Short-circuit most stuff in case it is from the same node.
     std::map<BlockHash,
              std::pair<NodeId, std::list<QueuedBlock>::iterator>>::iterator
         itInFlight = mapBlocksInFlight.find(hash);
     if (itInFlight != mapBlocksInFlight.end() &&
         itInFlight->second.first == nodeid) {
         if (pit) {
             *pit = &itInFlight->second.second;
         }
         return false;
     }
 
     // Make sure it's not listed somewhere already.
     RemoveBlockRequest(hash);
 
     std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(
         state->vBlocksInFlight.end(),
         {&block, std::unique_ptr<PartiallyDownloadedBlock>(
                      pit ? new PartiallyDownloadedBlock(config, &m_mempool)
                          : nullptr)});
     state->nBlocksInFlight++;
     if (state->nBlocksInFlight == 1) {
         // We're starting a block download (batch) from this peer.
         state->m_downloading_since = GetTime<std::chrono::microseconds>();
         m_peers_downloading_from++;
     }
 
     itInFlight = mapBlocksInFlight
                      .insert(std::make_pair(hash, std::make_pair(nodeid, it)))
                      .first;
 
     if (pit) {
         *pit = &itInFlight->second.second;
     }
 
     return true;
 }
 
 void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) {
     AssertLockHeld(cs_main);
 
     // Never request high-bandwidth mode from peers if we're blocks-only. Our
     // mempool will not contain the transactions necessary to reconstruct the
     // compact block.
     if (m_ignore_incoming_txs) {
         return;
     }
 
     CNodeState *nodestate = State(nodeid);
     if (!nodestate) {
         LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid);
         return;
     }
     if (!nodestate->m_provides_cmpctblocks) {
         return;
     }
     int num_outbound_hb_peers = 0;
     for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin();
          it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
         if (*it == nodeid) {
             lNodesAnnouncingHeaderAndIDs.erase(it);
             lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
             return;
         }
         CNodeState *state = State(*it);
         if (state != nullptr && !state->m_is_inbound) {
             ++num_outbound_hb_peers;
         }
     }
     if (nodestate->m_is_inbound) {
         // If we're adding an inbound HB peer, make sure we're not removing
         // our last outbound HB peer in the process.
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3 &&
             num_outbound_hb_peers == 1) {
             CNodeState *remove_node =
                 State(lNodesAnnouncingHeaderAndIDs.front());
             if (remove_node != nullptr && !remove_node->m_is_inbound) {
                 // Put the HB outbound peer in the second slot, so that it
                 // doesn't get removed.
                 std::swap(lNodesAnnouncingHeaderAndIDs.front(),
                           *std::next(lNodesAnnouncingHeaderAndIDs.begin()));
             }
         }
     }
     m_connman.ForNode(nodeid, [this](CNode *pfrom) EXCLUSIVE_LOCKS_REQUIRED(
                                   ::cs_main) {
         AssertLockHeld(::cs_main);
         if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
             // As per BIP152, we only get 3 of our peers to announce
             // blocks using compact encodings.
             m_connman.ForNode(
                 lNodesAnnouncingHeaderAndIDs.front(), [this](CNode *pnodeStop) {
                     m_connman.PushMessage(
                         pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion())
                                        .Make(NetMsgType::SENDCMPCT,
                                              /*high_bandwidth=*/false,
                                              /*version=*/CMPCTBLOCKS_VERSION));
                     // save BIP152 bandwidth state: we select peer to be
                     // low-bandwidth
                     pnodeStop->m_bip152_highbandwidth_to = false;
                     return true;
                 });
             lNodesAnnouncingHeaderAndIDs.pop_front();
         }
         m_connman.PushMessage(pfrom,
                               CNetMsgMaker(pfrom->GetCommonVersion())
                                   .Make(NetMsgType::SENDCMPCT,
                                         /*high_bandwidth=*/true,
                                         /*version=*/CMPCTBLOCKS_VERSION));
         // save BIP152 bandwidth state: we select peer to be high-bandwidth
         pfrom->m_bip152_highbandwidth_to = true;
         lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
         return true;
     });
 }
 
 bool PeerManagerImpl::TipMayBeStale() {
     AssertLockHeld(cs_main);
     const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
     if (m_last_tip_update.load() == 0s) {
         m_last_tip_update = GetTime<std::chrono::seconds>();
     }
     return m_last_tip_update.load() <
                GetTime<std::chrono::seconds>() -
                    std::chrono::seconds{consensusParams.nPowTargetSpacing *
                                         3} &&
            mapBlocksInFlight.empty();
 }
 
 bool PeerManagerImpl::CanDirectFetch() {
     return m_chainman.ActiveChain().Tip()->Time() >
            GetAdjustedTime() -
                m_chainparams.GetConsensus().PowTargetSpacing() * 20;
 }
 
 static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
     EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
     if (state->pindexBestKnownBlock &&
         pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) {
         return true;
     }
     if (state->pindexBestHeaderSent &&
         pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) {
         return true;
     }
     return false;
 }
 
 void PeerManagerImpl::ProcessBlockAvailability(NodeId nodeid) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     if (!state->hashLastUnknownBlock.IsNull()) {
         const CBlockIndex *pindex =
             m_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock);
         if (pindex && pindex->nChainWork > 0) {
             if (state->pindexBestKnownBlock == nullptr ||
                 pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
                 state->pindexBestKnownBlock = pindex;
             }
             state->hashLastUnknownBlock.SetNull();
         }
     }
 }
 
 void PeerManagerImpl::UpdateBlockAvailability(NodeId nodeid,
                                               const BlockHash &hash) {
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     ProcessBlockAvailability(nodeid);
 
     const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
     if (pindex && pindex->nChainWork > 0) {
         // An actually better block was announced.
         if (state->pindexBestKnownBlock == nullptr ||
             pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) {
             state->pindexBestKnownBlock = pindex;
         }
     } else {
         // An unknown block was announced; just assume that the latest one is
         // the best one.
         state->hashLastUnknownBlock = hash;
     }
 }
 
 void PeerManagerImpl::FindNextBlocksToDownload(
     NodeId nodeid, unsigned int count,
     std::vector<const CBlockIndex *> &vBlocks, NodeId &nodeStaller) {
     if (count == 0) {
         return;
     }
 
     vBlocks.reserve(vBlocks.size() + count);
     CNodeState *state = State(nodeid);
     assert(state != nullptr);
 
     // Make sure pindexBestKnownBlock is up to date, we'll need it.
     ProcessBlockAvailability(nodeid);
 
     if (state->pindexBestKnownBlock == nullptr ||
         state->pindexBestKnownBlock->nChainWork <
             m_chainman.ActiveChain().Tip()->nChainWork ||
         state->pindexBestKnownBlock->nChainWork <
             m_chainman.MinimumChainWork()) {
         // This peer has nothing interesting.
         return;
     }
 
     if (state->pindexLastCommonBlock == nullptr) {
         // Bootstrap quickly by guessing a parent of our best tip is the forking
         // point. Guessing wrong in either direction is not a problem.
         state->pindexLastCommonBlock =
             m_chainman
                 .ActiveChain()[std::min(state->pindexBestKnownBlock->nHeight,
                                         m_chainman.ActiveChain().Height())];
     }
 
     // If the peer reorganized, our previous pindexLastCommonBlock may not be an
     // ancestor of its current tip anymore. Go back enough to fix that.
     state->pindexLastCommonBlock = LastCommonAncestor(
         state->pindexLastCommonBlock, state->pindexBestKnownBlock);
     if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) {
         return;
     }
 
     std::vector<const CBlockIndex *> vToFetch;
     const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
     // Never fetch further than the best block we know the peer has, or more
     // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in
     // common with this peer. The +1 is so we can detect stalling, namely if we
     // would be able to download that next block if the window were 1 larger.
     int nWindowEnd =
         state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
     int nMaxHeight =
         std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
     NodeId waitingfor = -1;
     while (pindexWalk->nHeight < nMaxHeight) {
         // Read up to 128 (or more, if more blocks than that are needed)
         // successors of pindexWalk (towards pindexBestKnownBlock) into
         // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as
         // expensive as iterating over ~100 CBlockIndex* entries anyway.
         int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight,
                                 std::max<int>(count - vBlocks.size(), 128));
         vToFetch.resize(nToFetch);
         pindexWalk = state->pindexBestKnownBlock->GetAncestor(
             pindexWalk->nHeight + nToFetch);
         vToFetch[nToFetch - 1] = pindexWalk;
         for (unsigned int i = nToFetch - 1; i > 0; i--) {
             vToFetch[i - 1] = vToFetch[i]->pprev;
         }
 
         // Iterate over those blocks in vToFetch (in forward direction), adding
         // the ones that are not yet downloaded and not in flight to vBlocks. In
         // the meantime, update pindexLastCommonBlock as long as all ancestors
         // are already downloaded, or if it's already part of our chain (and
         // therefore don't need it even if pruned).
         for (const CBlockIndex *pindex : vToFetch) {
             if (!pindex->IsValid(BlockValidity::TREE)) {
                 // We consider the chain that this peer is on invalid.
                 return;
             }
             if (pindex->nStatus.hasData() ||
                 m_chainman.ActiveChain().Contains(pindex)) {
                 if (pindex->HaveTxsDownloaded()) {
                     state->pindexLastCommonBlock = pindex;
                 }
             } else if (!IsBlockRequested(pindex->GetBlockHash())) {
                 // The block is not already downloaded, and not yet in flight.
                 if (pindex->nHeight > nWindowEnd) {
                     // We reached the end of the window.
                     if (vBlocks.size() == 0 && waitingfor != nodeid) {
                         // We aren't able to fetch anything, but we would be if
                         // the download window was one larger.
                         nodeStaller = waitingfor;
                     }
                     return;
                 }
                 vBlocks.push_back(pindex);
                 if (vBlocks.size() == count) {
                     return;
                 }
             } else if (waitingfor == -1) {
                 // This is the first already-in-flight block.
                 waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
             }
         }
     }
 }
 
 } // namespace
 
 template <class InvId>
 static bool TooManyAnnouncements(const CNode &node,
                                  const InvRequestTracker<InvId> &requestTracker,
                                  const DataRequestParameters &requestParams) {
     return !node.HasPermission(
                requestParams.bypass_request_limits_permissions) &&
            requestTracker.Count(node.GetId()) >=
                requestParams.max_peer_announcements;
 }
 
 /**
  * Compute the request time for this announcement, current time plus delays for:
  *   - nonpref_peer_delay for announcements from non-preferred connections
  *   - overloaded_peer_delay for announcements from peers which have at least
  *     max_peer_request_in_flight requests in flight (and don't have
  * NetPermissionFlags::Relay).
  */
 template <class InvId>
 static std::chrono::microseconds
 ComputeRequestTime(const CNode &node,
                    const InvRequestTracker<InvId> &requestTracker,
                    const DataRequestParameters &requestParams,
                    std::chrono::microseconds current_time, bool preferred) {
     auto delay = std::chrono::microseconds{0};
 
     if (!preferred) {
         delay += requestParams.nonpref_peer_delay;
     }
 
     if (!node.HasPermission(requestParams.bypass_request_limits_permissions) &&
         requestTracker.CountInFlight(node.GetId()) >=
             requestParams.max_peer_request_in_flight) {
         delay += requestParams.overloaded_peer_delay;
     }
 
     return current_time + delay;
 }
 
 void PeerManagerImpl::PushNodeVersion(const Config &config, CNode &pnode,
                                       const Peer &peer) {
     uint64_t my_services{peer.m_our_services};
     const int64_t nTime{count_seconds(GetTime<std::chrono::seconds>())};
     uint64_t nonce = pnode.GetLocalNonce();
     const int nNodeStartingHeight{m_best_height};
     NodeId nodeid = pnode.GetId();
     CAddress addr = pnode.addr;
     uint64_t extraEntropy = pnode.GetLocalExtraEntropy();
 
     CService addr_you =
         addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible()
             ? addr
             : CService();
     uint64_t your_services{addr.nServices};
 
     const bool tx_relay = !m_ignore_incoming_txs && !pnode.IsBlockOnlyConn() &&
                           !pnode.IsFeelerConn();
     m_connman.PushMessage(
         // your_services, addr_you: Together the pre-version-31402 serialization
         //     of CAddress "addrYou" (without nTime)
         // my_services, CService(): Together the pre-version-31402 serialization
         //     of CAddress "addrMe" (without nTime)
         &pnode, CNetMsgMaker(INIT_PROTO_VERSION)
                     .Make(NetMsgType::VERSION, PROTOCOL_VERSION, my_services,
                           nTime, your_services, addr_you, my_services,
                           CService(), nonce, userAgent(config),
                           nNodeStartingHeight, tx_relay, extraEntropy));
 
     if (fLogIPs) {
         LogPrint(BCLog::NET,
                  "send version message: version %d, blocks=%d, them=%s, "
                  "txrelay=%d, peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, addr_you.ToString(),
                  tx_relay, nodeid);
     } else {
         LogPrint(BCLog::NET,
                  "send version message: version %d, blocks=%d, "
                  "txrelay=%d, peer=%d\n",
                  PROTOCOL_VERSION, nNodeStartingHeight, tx_relay, nodeid);
     }
 }
 
 void PeerManagerImpl::AddTxAnnouncement(
     const CNode &node, const TxId &txid,
     std::chrono::microseconds current_time) {
     // For m_txrequest and state
     AssertLockHeld(::cs_main);
 
     if (TooManyAnnouncements(node, m_txrequest, TX_REQUEST_PARAMS)) {
         return;
     }
 
     const bool preferred = isPreferredDownloadPeer(node);
     auto reqtime = ComputeRequestTime(node, m_txrequest, TX_REQUEST_PARAMS,
                                       current_time, preferred);
 
     m_txrequest.ReceivedInv(node.GetId(), txid, preferred, reqtime);
 }
 
 void PeerManagerImpl::AddProofAnnouncement(
     const CNode &node, const avalanche::ProofId &proofid,
     std::chrono::microseconds current_time, bool preferred) {
     // For m_proofrequest
     AssertLockHeld(cs_proofrequest);
 
     if (TooManyAnnouncements(node, m_proofrequest, PROOF_REQUEST_PARAMS)) {
         return;
     }
 
     auto reqtime = ComputeRequestTime(
         node, m_proofrequest, PROOF_REQUEST_PARAMS, current_time, preferred);
 
     m_proofrequest.ReceivedInv(node.GetId(), proofid, preferred, reqtime);
 }
 
 void PeerManagerImpl::UpdateLastBlockAnnounceTime(NodeId node,
                                                   int64_t time_in_seconds) {
     LOCK(cs_main);
     CNodeState *state = State(node);
     if (state) {
         state->m_last_block_announcement = time_in_seconds;
     }
 }
 
 void PeerManagerImpl::InitializeNode(const Config &config, CNode &node,
                                      ServiceFlags our_services) {
     NodeId nodeid = node.GetId();
     {
         LOCK(cs_main);
         m_node_states.emplace_hint(m_node_states.end(),
                                    std::piecewise_construct,
                                    std::forward_as_tuple(nodeid),
                                    std::forward_as_tuple(node.IsInboundConn()));
         assert(m_txrequest.Count(nodeid) == 0);
     }
     PeerRef peer = std::make_shared<Peer>(nodeid, our_services);
     {
         LOCK(m_peer_mutex);
         m_peer_map.emplace_hint(m_peer_map.end(), nodeid, peer);
     }
     if (!node.IsInboundConn()) {
         PushNodeVersion(config, node, *peer);
     }
 }
 
 void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler &scheduler) {
     std::set<TxId> unbroadcast_txids = m_mempool.GetUnbroadcastTxs();
 
     for (const TxId &txid : unbroadcast_txids) {
         // Sanity check: all unbroadcast txns should exist in the mempool
         if (m_mempool.exists(txid)) {
             RelayTransaction(txid);
         } else {
             m_mempool.RemoveUnbroadcastTx(txid, true);
         }
     }
 
-    if (g_avalanche && isAvalancheEnabled(gArgs)) {
+    if (m_avalanche && isAvalancheEnabled(gArgs)) {
         // Get and sanitize the list of proofids to broadcast. The RelayProof
         // call is done in a second loop to avoid locking cs_vNodes while
         // cs_peerManager is locked which would cause a potential deadlock due
         // to reversed lock order.
         auto unbroadcasted_proofids =
-            g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
+            m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
                 auto unbroadcasted_proofids = pm.getUnbroadcastProofs();
 
                 auto it = unbroadcasted_proofids.begin();
                 while (it != unbroadcasted_proofids.end()) {
                     // Sanity check: all unbroadcast proofs should be bound to a
                     // peer in the peermanager
                     if (!pm.isBoundToPeer(*it)) {
                         pm.removeUnbroadcastProof(*it);
                         it = unbroadcasted_proofids.erase(it);
                         continue;
                     }
 
                     ++it;
                 }
 
                 return unbroadcasted_proofids;
             });
 
         // Remaining proofids are the ones to broadcast
         for (const auto &proofid : unbroadcasted_proofids) {
             RelayProof(proofid);
         }
     }
 
     // Schedule next run for 10-15 minutes in the future.
     // We add randomness on every cycle to avoid the possibility of P2P
     // fingerprinting.
     const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
     scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
                               reattemptBroadcastInterval);
 }
 
 void PeerManagerImpl::UpdateAvalancheStatistics() const {
     m_connman.ForEachNode([](CNode *pnode) {
         pnode->updateAvailabilityScore(AVALANCHE_STATISTICS_DECAY_FACTOR);
     });
 
-    if (!g_avalanche) {
+    if (!m_avalanche) {
         // Not enabled or not ready yet
         return;
     }
 
     // Generate a peer availability score by computing an exponentially
     // weighted moving average of the average of node availability scores.
     // This ensures the peer score is bound to the lifetime of its proof which
     // incentivizes stable network activity.
-    g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
+    m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
         pm.updateAvailabilityScores(
             AVALANCHE_STATISTICS_DECAY_FACTOR, [&](NodeId nodeid) -> double {
                 double score{0.0};
                 m_connman.ForNode(nodeid, [&](CNode *pavanode) {
                     score = pavanode->getAvailabilityScore();
                     return true;
                 });
                 return score;
             });
     });
 }
 
 void PeerManagerImpl::AvalanchePeriodicNetworking(CScheduler &scheduler) const {
     const auto now = GetTime<std::chrono::seconds>();
     std::vector<NodeId> avanode_ids;
     bool fQuorumEstablished;
     bool fShouldRequestMoreNodes;
 
-    if (!g_avalanche) {
+    if (!m_avalanche) {
         // Not enabled or not ready yet, retry later
         goto scheduleLater;
     }
 
-    g_avalanche->sendDelayedAvahello();
+    m_avalanche->sendDelayedAvahello();
 
-    fQuorumEstablished = g_avalanche->isQuorumEstablished();
+    fQuorumEstablished = m_avalanche->isQuorumEstablished();
     fShouldRequestMoreNodes =
-        g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
+        m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
             return pm.shouldRequestMoreNodes();
         });
 
     m_connman.ForEachNode([&](CNode *pnode) {
         // Build a list of the avalanche peers nodeids
         if (pnode->m_avalanche_enabled) {
             avanode_ids.push_back(pnode->GetId());
         }
 
         PeerRef peer = GetPeerRef(pnode->GetId());
         if (peer == nullptr) {
             return;
         }
         // If a proof radix tree timed out, cleanup
         if (peer->m_proof_relay &&
             now > (peer->m_proof_relay->lastSharedProofsUpdate.load() +
                    AVALANCHE_AVAPROOFS_TIMEOUT)) {
             peer->m_proof_relay->sharedProofs = {};
         }
     });
 
     if (avanode_ids.empty()) {
         // No node is available for messaging, retry later
         goto scheduleLater;
     }
 
     Shuffle(avanode_ids.begin(), avanode_ids.end(), FastRandomContext());
 
     // Request avalanche addresses from our peers
     for (NodeId avanodeId : avanode_ids) {
         const bool sentGetavaaddr =
             m_connman.ForNode(avanodeId, [&](CNode *pavanode) {
                 if (!fQuorumEstablished || !pavanode->IsInboundConn()) {
                     m_connman.PushMessage(
                         pavanode, CNetMsgMaker(pavanode->GetCommonVersion())
                                       .Make(NetMsgType::GETAVAADDR));
                     PeerRef peer = GetPeerRef(avanodeId);
                     WITH_LOCK(peer->m_addr_token_bucket_mutex,
                               peer->m_addr_token_bucket += GetMaxAddrToSend());
                     return true;
                 }
                 return false;
             });
 
         // If we have no reason to believe that we need more nodes, only request
         // addresses from one of our peers.
         if (sentGetavaaddr && fQuorumEstablished && !fShouldRequestMoreNodes) {
             break;
         }
     }
 
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
         // Don't request proofs while in IBD. We're likely to orphan them
         // because we don't have the UTXOs.
         goto scheduleLater;
     }
 
     // If we never had an avaproofs message yet, be kind and only request to a
     // subset of our peers as we expect a ton of avaproofs message in the
     // process.
-    if (g_avalanche->getAvaproofsNodeCounter() == 0) {
+    if (m_avalanche->getAvaproofsNodeCounter() == 0) {
         avanode_ids.resize(std::min<size_t>(avanode_ids.size(), 3));
     }
 
     for (NodeId nodeid : avanode_ids) {
         // Send a getavaproofs to all of our peers
         m_connman.ForNode(nodeid, [&](CNode *pavanode) {
             PeerRef peer = GetPeerRef(nodeid);
             if (peer->m_proof_relay) {
                 m_connman.PushMessage(pavanode,
                                       CNetMsgMaker(pavanode->GetCommonVersion())
                                           .Make(NetMsgType::GETAVAPROOFS));
 
                 peer->m_proof_relay->compactproofs_requested = true;
             }
             return true;
         });
     }
 
 scheduleLater:
     // Schedule next run for 2-5 minutes in the future.
     // We add randomness on every cycle to avoid the possibility of P2P
     // fingerprinting.
     const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
     scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
                               avalanchePeriodicNetworkingInterval);
 }
 
 void PeerManagerImpl::FinalizeNode(const Config &config, const CNode &node) {
     NodeId nodeid = node.GetId();
     int misbehavior{0};
     {
         LOCK(cs_main);
         {
             // We remove the PeerRef from g_peer_map here, but we don't always
             // destruct the Peer. Sometimes another thread is still holding a
             // PeerRef, so the refcount is >= 1. Be careful not to do any
             // processing here that assumes Peer won't be changed before it's
             // destructed.
             PeerRef peer = RemovePeer(nodeid);
             assert(peer != nullptr);
             misbehavior = WITH_LOCK(peer->m_misbehavior_mutex,
                                     return peer->m_misbehavior_score);
             LOCK(m_peer_mutex);
             m_peer_map.erase(nodeid);
         }
         CNodeState *state = State(nodeid);
         assert(state != nullptr);
 
         if (state->fSyncStarted) {
             nSyncStarted--;
         }
 
         for (const QueuedBlock &entry : state->vBlocksInFlight) {
             mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
         }
         m_orphanage.EraseForPeer(nodeid);
         m_txrequest.DisconnectedPeer(nodeid);
         m_num_preferred_download_peers -= state->fPreferredDownload;
         m_peers_downloading_from -= (state->nBlocksInFlight != 0);
         assert(m_peers_downloading_from >= 0);
         m_outbound_peers_with_protect_from_disconnect -=
             state->m_chain_sync.m_protect;
         assert(m_outbound_peers_with_protect_from_disconnect >= 0);
 
         m_node_states.erase(nodeid);
 
         if (m_node_states.empty()) {
             // Do a consistency check after the last peer is removed.
             assert(mapBlocksInFlight.empty());
             assert(m_num_preferred_download_peers == 0);
             assert(m_peers_downloading_from == 0);
             assert(m_outbound_peers_with_protect_from_disconnect == 0);
             assert(m_txrequest.Size() == 0);
             assert(m_orphanage.Size() == 0);
         }
     }
 
     if (node.fSuccessfullyConnected && misbehavior == 0 &&
         !node.IsBlockOnlyConn() && !node.IsInboundConn()) {
         // Only change visible addrman state for full outbound peers. We don't
         // call Connected() for feeler connections since they don't have
         // fSuccessfullyConnected set.
         m_addrman.Connected(node.addr);
     }
     {
         LOCK(m_headers_presync_mutex);
         m_headers_presync_stats.erase(nodeid);
     }
 
     WITH_LOCK(cs_proofrequest, m_proofrequest.DisconnectedPeer(nodeid));
 
     LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
 }
 
 PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const {
     LOCK(m_peer_mutex);
     auto it = m_peer_map.find(id);
     return it != m_peer_map.end() ? it->second : nullptr;
 }
 
 PeerRef PeerManagerImpl::RemovePeer(NodeId id) {
     PeerRef ret;
     LOCK(m_peer_mutex);
     auto it = m_peer_map.find(id);
     if (it != m_peer_map.end()) {
         ret = std::move(it->second);
         m_peer_map.erase(it);
     }
     return ret;
 }
 
 bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid,
                                         CNodeStateStats &stats) const {
     {
         LOCK(cs_main);
         const CNodeState *state = State(nodeid);
         if (state == nullptr) {
             return false;
         }
         stats.nSyncHeight = state->pindexBestKnownBlock
                                 ? state->pindexBestKnownBlock->nHeight
                                 : -1;
         stats.nCommonHeight = state->pindexLastCommonBlock
                                   ? state->pindexLastCommonBlock->nHeight
                                   : -1;
         for (const QueuedBlock &queue : state->vBlocksInFlight) {
             if (queue.pindex) {
                 stats.vHeightInFlight.push_back(queue.pindex->nHeight);
             }
         }
     }
 
     PeerRef peer = GetPeerRef(nodeid);
     if (peer == nullptr) {
         return false;
     }
     stats.their_services = peer->m_their_services;
     stats.m_starting_height = peer->m_starting_height;
     // It is common for nodes with good ping times to suddenly become lagged,
     // due to a new block arriving or other large transfer.
     // Merely reporting pingtime might fool the caller into thinking the node
     // was still responsive, since pingtime does not update until the ping is
     // complete, which might take a while. So, if a ping is taking an unusually
     // long time in flight, the caller can immediately detect that this is
     // happening.
     auto ping_wait{0us};
     if ((0 != peer->m_ping_nonce_sent) &&
         (0 != peer->m_ping_start.load().count())) {
         ping_wait =
             GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
     }
 
     if (auto tx_relay = peer->GetTxRelay()) {
         stats.m_relay_txs = WITH_LOCK(tx_relay->m_bloom_filter_mutex,
                                       return tx_relay->m_relay_txs);
         stats.m_fee_filter_received = tx_relay->m_fee_filter_received.load();
     } else {
         stats.m_relay_txs = false;
         stats.m_fee_filter_received = Amount::zero();
     }
 
     stats.m_ping_wait = ping_wait;
     stats.m_addr_processed = peer->m_addr_processed.load();
     stats.m_addr_rate_limited = peer->m_addr_rate_limited.load();
     stats.m_addr_relay_enabled = peer->m_addr_relay_enabled.load();
     {
         LOCK(peer->m_headers_sync_mutex);
         if (peer->m_headers_sync) {
             stats.presync_height = peer->m_headers_sync->GetPresyncHeight();
         }
     }
 
     return true;
 }
 
 void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef &tx) {
     size_t max_extra_txn = gArgs.GetIntArg(
         "-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
     if (max_extra_txn <= 0) {
         return;
     }
 
     if (!vExtraTxnForCompact.size()) {
         vExtraTxnForCompact.resize(max_extra_txn);
     }
 
     vExtraTxnForCompact[vExtraTxnForCompactIt] =
         std::make_pair(tx->GetHash(), tx);
     vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
 }
 
 void PeerManagerImpl::Misbehaving(Peer &peer, int howmuch,
                                   const std::string &message) {
     assert(howmuch > 0);
 
     LOCK(peer.m_misbehavior_mutex);
     const int score_before{peer.m_misbehavior_score};
     peer.m_misbehavior_score += howmuch;
     const int score_now{peer.m_misbehavior_score};
 
     const std::string message_prefixed =
         message.empty() ? "" : (": " + message);
     std::string warning;
 
     if (score_now >= DISCOURAGEMENT_THRESHOLD &&
         score_before < DISCOURAGEMENT_THRESHOLD) {
         warning = " DISCOURAGE THRESHOLD EXCEEDED";
         peer.m_should_discourage = true;
     }
 
     LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n", peer.m_id,
              score_before, score_now, warning, message_prefixed);
 }
 
 bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid,
                                               const BlockValidationState &state,
                                               bool via_compact_block,
                                               const std::string &message) {
     PeerRef peer{GetPeerRef(nodeid)};
     switch (state.GetResult()) {
         case BlockValidationResult::BLOCK_RESULT_UNSET:
             break;
         case BlockValidationResult::BLOCK_HEADER_LOW_WORK:
             // We didn't try to process the block because the header chain may
             // have too little work.
             break;
         // The node is providing invalid data:
         case BlockValidationResult::BLOCK_CONSENSUS:
         case BlockValidationResult::BLOCK_MUTATED:
             if (!via_compact_block) {
                 if (peer) {
                     Misbehaving(*peer, 100, message);
                 }
                 return true;
             }
             break;
         case BlockValidationResult::BLOCK_CACHED_INVALID: {
             LOCK(cs_main);
             CNodeState *node_state = State(nodeid);
             if (node_state == nullptr) {
                 break;
             }
 
             // Ban outbound (but not inbound) peers if on an invalid chain.
             // Exempt HB compact block peers. Manual connections are always
             // protected from discouragement.
             if (!via_compact_block && !node_state->m_is_inbound) {
                 if (peer) {
                     Misbehaving(*peer, 100, message);
                 }
                 return true;
             }
             break;
         }
         case BlockValidationResult::BLOCK_INVALID_HEADER:
         case BlockValidationResult::BLOCK_CHECKPOINT:
         case BlockValidationResult::BLOCK_INVALID_PREV:
             if (peer) {
                 Misbehaving(*peer, 100, message);
             }
             return true;
         // Conflicting (but not necessarily invalid) data or different policy:
         case BlockValidationResult::BLOCK_MISSING_PREV:
             // TODO: Handle this much more gracefully (10 DoS points is super
             // arbitrary)
             if (peer) {
                 Misbehaving(*peer, 10, message);
             }
             return true;
         case BlockValidationResult::BLOCK_TIME_FUTURE:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid,
                                            const TxValidationState &state,
                                            const std::string &message) {
     PeerRef peer{GetPeerRef(nodeid)};
     switch (state.GetResult()) {
         case TxValidationResult::TX_RESULT_UNSET:
             break;
         // The node is providing invalid data:
         case TxValidationResult::TX_CONSENSUS:
             if (peer) {
                 Misbehaving(*peer, 100, message);
             }
             return true;
         // Conflicting (but not necessarily invalid) data or different policy:
         case TxValidationResult::TX_INPUTS_NOT_STANDARD:
         case TxValidationResult::TX_NOT_STANDARD:
         case TxValidationResult::TX_MISSING_INPUTS:
         case TxValidationResult::TX_PREMATURE_SPEND:
         case TxValidationResult::TX_DUPLICATE:
         case TxValidationResult::TX_CONFLICT:
         case TxValidationResult::TX_CHILD_BEFORE_PARENT:
         case TxValidationResult::TX_MEMPOOL_POLICY:
         case TxValidationResult::TX_NO_MEMPOOL:
             break;
     }
     if (message != "") {
         LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
     }
     return false;
 }
 
 bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex *pindex) {
     AssertLockHeld(cs_main);
     if (m_chainman.ActiveChain().Contains(pindex)) {
         return true;
     }
     return pindex->IsValid(BlockValidity::SCRIPTS) &&
            (m_chainman.m_best_header != nullptr) &&
            (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() <
             STALE_RELAY_AGE_LIMIT) &&
            (GetBlockProofEquivalentTime(
                 *m_chainman.m_best_header, *pindex, *m_chainman.m_best_header,
                 m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
 }
 
 std::optional<std::string>
 PeerManagerImpl::FetchBlock(const Config &config, NodeId peer_id,
                             const CBlockIndex &block_index) {
     if (m_chainman.m_blockman.LoadingBlocks()) {
         return "Loading blocks ...";
     }
 
     LOCK(cs_main);
     // Ensure this peer exists and hasn't been disconnected
     CNodeState *state = State(peer_id);
     if (state == nullptr) {
         return "Peer does not exist";
     }
     // Mark block as in-flight unless it already is (for this peer).
     // If a block was already in-flight for a different peer, its BLOCKTXN
     // response will be dropped.
     if (!BlockRequested(config, peer_id, block_index)) {
         return "Already requested from this peer";
     }
 
     // Construct message to request the block
     const BlockHash &hash{block_index.GetBlockHash()};
     const std::vector<CInv> invs{CInv(MSG_BLOCK, hash)};
 
     // Send block request message to the peer
     if (!m_connman.ForNode(peer_id, [this, &invs](CNode *node) {
             const CNetMsgMaker msgMaker(node->GetCommonVersion());
             this->m_connman.PushMessage(
                 node, msgMaker.Make(NetMsgType::GETDATA, invs));
             return true;
         })) {
         return "Node not fully connected";
     }
 
     LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", hash.ToString(),
              peer_id);
     return std::nullopt;
 }
 
-std::unique_ptr<PeerManager> PeerManager::make(CConnman &connman,
-                                               AddrMan &addrman, BanMan *banman,
-                                               ChainstateManager &chainman,
-                                               CTxMemPool &pool,
-                                               bool ignore_incoming_txs) {
+std::unique_ptr<PeerManager>
+PeerManager::make(CConnman &connman, AddrMan &addrman, BanMan *banman,
+                  ChainstateManager &chainman, CTxMemPool &pool,
+                  avalanche::Processor *const avalanche,
+                  bool ignore_incoming_txs) {
     return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman,
-                                             pool, ignore_incoming_txs);
+                                             pool, avalanche,
+                                             ignore_incoming_txs);
 }
 
 PeerManagerImpl::PeerManagerImpl(CConnman &connman, AddrMan &addrman,
                                  BanMan *banman, ChainstateManager &chainman,
-                                 CTxMemPool &pool, bool ignore_incoming_txs)
+                                 CTxMemPool &pool,
+                                 avalanche::Processor *const avalanche,
+                                 bool ignore_incoming_txs)
     : m_chainparams(chainman.GetParams()), m_connman(connman),
       m_addrman(addrman), m_banman(banman), m_chainman(chainman),
-      m_mempool(pool), m_ignore_incoming_txs(ignore_incoming_txs) {}
+      m_mempool(pool), m_avalanche(avalanche),
+      m_ignore_incoming_txs(ignore_incoming_txs) {}
 
 void PeerManagerImpl::StartScheduledTasks(CScheduler &scheduler) {
     // Stale tip checking and peer eviction are on two different timers, but we
     // don't want them to get out of sync due to drift in the scheduler, so we
     // combine them in one function and schedule at the quicker (peer-eviction)
     // timer.
     static_assert(
         EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL,
         "peer eviction timer should be less than stale tip check timer");
     scheduler.scheduleEvery(
         [this]() {
             this->CheckForStaleTipAndEvictPeers();
             return true;
         },
         std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
 
     // schedule next run for 10-15 minutes in the future
     const auto reattemptBroadcastInterval = 10min + GetRandMillis(5min);
     scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); },
                               reattemptBroadcastInterval);
 
     // Update the avalanche statistics on a schedule
     scheduler.scheduleEvery(
         [this]() {
             UpdateAvalancheStatistics();
             return true;
         },
         AVALANCHE_STATISTICS_REFRESH_PERIOD);
 
     // schedule next run for 2-5 minutes in the future
     const auto avalanchePeriodicNetworkingInterval = 2min + GetRandMillis(3min);
     scheduler.scheduleFromNow([&] { AvalanchePeriodicNetworking(scheduler); },
                               avalanchePeriodicNetworkingInterval);
 }
 
 /**
  * Evict orphan txn pool entries based on a newly connected
  * block, remember the recently confirmed transactions, and delete tracked
  * announcements for them. Also save the time of the last tip update and
  * possibly reduce dynamic block stalling timeout.
  */
 void PeerManagerImpl::BlockConnected(
     const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex) {
     m_orphanage.EraseForBlock(*pblock);
     m_last_tip_update = GetTime<std::chrono::seconds>();
 
     {
         LOCK(m_recent_confirmed_transactions_mutex);
         for (const CTransactionRef &ptx : pblock->vtx) {
             m_recent_confirmed_transactions.insert(ptx->GetId());
         }
     }
     {
         LOCK(cs_main);
         for (const auto &ptx : pblock->vtx) {
             m_txrequest.ForgetInvId(ptx->GetId());
         }
     }
 
     // In case the dynamic timeout was doubled once or more, reduce it slowly
     // back to its default value
     auto stalling_timeout = m_block_stalling_timeout.load();
     Assume(stalling_timeout >= BLOCK_STALLING_TIMEOUT_DEFAULT);
     if (stalling_timeout != BLOCK_STALLING_TIMEOUT_DEFAULT) {
         const auto new_timeout =
             std::max(std::chrono::duration_cast<std::chrono::seconds>(
                          stalling_timeout * 0.85),
                      BLOCK_STALLING_TIMEOUT_DEFAULT);
         if (m_block_stalling_timeout.compare_exchange_strong(stalling_timeout,
                                                              new_timeout)) {
             LogPrint(BCLog::NET, "Decreased stalling timeout to %d seconds\n",
                      count_seconds(new_timeout));
         }
     }
 }
 
 void PeerManagerImpl::BlockDisconnected(
     const std::shared_ptr<const CBlock> &block, const CBlockIndex *pindex) {
     // To avoid relay problems with transactions that were previously
     // confirmed, clear our filter of recently confirmed transactions whenever
     // there's a reorg.
     // This means that in a 1-block reorg (where 1 block is disconnected and
     // then another block reconnected), our filter will drop to having only one
     // block's worth of transactions in it, but that should be fine, since
     // presumably the most common case of relaying a confirmed transaction
     // should be just after a new block containing it is found.
     LOCK(m_recent_confirmed_transactions_mutex);
     m_recent_confirmed_transactions.reset();
 }
 
 /**
  * Maintain state about the best-seen block and fast-announce a compact block
  * to compatible peers.
  */
 void PeerManagerImpl::NewPoWValidBlock(
     const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &pblock) {
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock =
         std::make_shared<const CBlockHeaderAndShortTxIDs>(*pblock);
     const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
 
     LOCK(cs_main);
 
     if (pindex->nHeight <= m_highest_fast_announce) {
         return;
     }
     m_highest_fast_announce = pindex->nHeight;
 
     BlockHash hashBlock(pblock->GetHash());
     const std::shared_future<CSerializedNetMsg> lazy_ser{
         std::async(std::launch::deferred, [&] {
             return msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock);
         })};
 
     {
         LOCK(m_most_recent_block_mutex);
         m_most_recent_block_hash = hashBlock;
         m_most_recent_block = pblock;
         m_most_recent_compact_block = pcmpctblock;
     }
 
     m_connman.ForEachNode(
         [this, pindex, &lazy_ser, &hashBlock](CNode *pnode)
             EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
                 AssertLockHeld(::cs_main);
 
                 if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION ||
                     pnode->fDisconnect) {
                     return;
                 }
                 ProcessBlockAvailability(pnode->GetId());
                 CNodeState &state = *State(pnode->GetId());
                 // If the peer has, or we announced to them the previous block
                 // already, but we don't think they have this one, go ahead and
                 // announce it.
                 if (state.m_requested_hb_cmpctblocks &&
                     !PeerHasHeader(&state, pindex) &&
                     PeerHasHeader(&state, pindex->pprev)) {
                     LogPrint(BCLog::NET,
                              "%s sending header-and-ids %s to peer=%d\n",
                              "PeerManager::NewPoWValidBlock",
                              hashBlock.ToString(), pnode->GetId());
 
                     const CSerializedNetMsg &ser_cmpctblock{lazy_ser.get()};
                     m_connman.PushMessage(pnode, ser_cmpctblock.Copy());
                     state.pindexBestHeaderSent = pindex;
                 }
             });
 }
 
 /**
  * Update our best height and announce any block hashes which weren't previously
  * in m_chainman.ActiveChain() to our peers.
  */
 void PeerManagerImpl::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                       const CBlockIndex *pindexFork,
                                       bool fInitialDownload) {
     SetBestHeight(pindexNew->nHeight);
     SetServiceFlagsIBDCache(!fInitialDownload);
 
     // Don't relay inventory during initial block download.
     if (fInitialDownload) {
         return;
     }
 
     // Find the hashes of all blocks that weren't previously in the best chain.
     std::vector<BlockHash> vHashes;
     const CBlockIndex *pindexToAnnounce = pindexNew;
     while (pindexToAnnounce != pindexFork) {
         vHashes.push_back(pindexToAnnounce->GetBlockHash());
         pindexToAnnounce = pindexToAnnounce->pprev;
         if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
             // Limit announcements in case of a huge reorganization. Rely on the
             // peer's synchronization mechanism in that case.
             break;
         }
     }
 
     {
         LOCK(m_peer_mutex);
         for (auto &it : m_peer_map) {
             Peer &peer = *it.second;
             LOCK(peer.m_block_inv_mutex);
             for (const BlockHash &hash : reverse_iterate(vHashes)) {
                 peer.m_blocks_for_headers_relay.push_back(hash);
             }
         }
     }
 
     m_connman.WakeMessageHandler();
 }
 
 /**
  * Handle invalid block rejection and consequent peer banning, maintain which
  * peers announce compact blocks.
  */
 void PeerManagerImpl::BlockChecked(const CBlock &block,
                                    const BlockValidationState &state) {
     LOCK(cs_main);
 
     const BlockHash hash = block.GetHash();
     std::map<BlockHash, std::pair<NodeId, bool>>::iterator it =
         mapBlockSource.find(hash);
 
     // If the block failed validation, we know where it came from and we're
     // still connected to that peer, maybe punish.
     if (state.IsInvalid() && it != mapBlockSource.end() &&
         State(it->second.first)) {
         MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state,
                                 /*via_compact_block=*/!it->second.second);
     }
     // Check that:
     // 1. The block is valid
     // 2. We're not in initial block download
     // 3. This is currently the best block we're aware of. We haven't updated
     //    the tip yet so we have no way to check this directly here. Instead we
     //    just check that there are currently no other blocks in flight.
     else if (state.IsValid() &&
              !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
              mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
         if (it != mapBlockSource.end()) {
             MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first);
         }
     }
 
     if (it != mapBlockSource.end()) {
         mapBlockSource.erase(it);
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Messages
 //
 
 bool PeerManagerImpl::AlreadyHaveTx(const TxId &txid) {
     if (m_chainman.ActiveChain().Tip()->GetBlockHash() !=
         hashRecentRejectsChainTip) {
         // If the chain tip has changed previously rejected transactions
         // might be now valid, e.g. due to a nLockTime'd tx becoming
         // valid, or a double-spend. Reset the rejects filter and give
         // those txs a second chance.
         hashRecentRejectsChainTip =
             m_chainman.ActiveChain().Tip()->GetBlockHash();
         m_recent_rejects.reset();
     }
 
     if (m_orphanage.HaveTx(txid)) {
         return true;
     }
 
     {
         LOCK(m_recent_confirmed_transactions_mutex);
         if (m_recent_confirmed_transactions.contains(txid)) {
             return true;
         }
     }
 
     return m_recent_rejects.contains(txid) || m_mempool.exists(txid);
 }
 
 bool PeerManagerImpl::AlreadyHaveBlock(const BlockHash &block_hash) {
     return m_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr;
 }
 
 bool PeerManagerImpl::AlreadyHaveProof(const avalanche::ProofId &proofid) {
-    assert(g_avalanche);
+    assert(m_avalanche);
 
-    auto localProof = g_avalanche->getLocalProof();
+    auto localProof = m_avalanche->getLocalProof();
     if (localProof && localProof->getId() == proofid) {
         return true;
     }
 
-    return g_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
+    return m_avalanche->withPeerManager([&proofid](avalanche::PeerManager &pm) {
         return pm.exists(proofid) || pm.isInvalid(proofid);
     });
 }
 
 void PeerManagerImpl::SendPings() {
     LOCK(m_peer_mutex);
     for (auto &it : m_peer_map) {
         it.second->m_ping_queued = true;
     }
 }
 
 void PeerManagerImpl::RelayTransaction(const TxId &txid) {
     LOCK(m_peer_mutex);
     for (auto &it : m_peer_map) {
         Peer &peer = *it.second;
         auto tx_relay = peer.GetTxRelay();
         if (!tx_relay) {
             continue;
         }
         LOCK(tx_relay->m_tx_inventory_mutex);
         if (!tx_relay->m_tx_inventory_known_filter.contains(txid)) {
             tx_relay->m_tx_inventory_to_send.insert(txid);
         }
     }
 }
 
 void PeerManagerImpl::RelayProof(const avalanche::ProofId &proofid) {
     LOCK(m_peer_mutex);
     for (auto &it : m_peer_map) {
         Peer &peer = *it.second;
 
         if (!peer.m_proof_relay) {
             continue;
         }
         LOCK(peer.m_proof_relay->m_proof_inventory_mutex);
         if (!peer.m_proof_relay->m_proof_inventory_known_filter.contains(
                 proofid)) {
             peer.m_proof_relay->m_proof_inventory_to_send.insert(proofid);
         }
     }
 }
 
 void PeerManagerImpl::RelayAddress(NodeId originator, const CAddress &addr,
                                    bool fReachable) {
     // We choose the same nodes within a given 24h window (if the list of
     // connected nodes does not change) and we don't relay to nodes that already
     // know an address. So within 24h we will likely relay a given address once.
     // This is to prevent a peer from unjustly giving their address better
     // propagation by sending it to us repeatedly.
 
     if (!fReachable && !addr.IsRelayable()) {
         return;
     }
 
     // Relay to a limited number of other nodes
     // Use deterministic randomness to send to the same nodes for 24 hours
     // at a time so the m_addr_knowns of the chosen nodes prevent repeats
     const uint64_t hash_addr{CServiceHash(0, 0)(addr)};
     const auto current_time{GetTime<std::chrono::seconds>()};
     // Adding address hash makes exact rotation time different per address,
     // while preserving periodicity.
     const uint64_t time_addr{
         (static_cast<uint64_t>(count_seconds(current_time)) + hash_addr) /
         count_seconds(ROTATE_ADDR_RELAY_DEST_INTERVAL)};
 
     const CSipHasher hasher{
         m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY)
             .Write(hash_addr)
             .Write(time_addr)};
     FastRandomContext insecure_rand;
 
     // Relay reachable addresses to 2 peers. Unreachable addresses are relayed
     // randomly to 1 or 2 peers.
     unsigned int nRelayNodes = (fReachable || (hasher.Finalize() & 1)) ? 2 : 1;
     std::array<std::pair<uint64_t, Peer *>, 2> best{
         {{0, nullptr}, {0, nullptr}}};
     assert(nRelayNodes <= best.size());
 
     LOCK(m_peer_mutex);
 
     for (auto &[id, peer] : m_peer_map) {
         if (peer->m_addr_relay_enabled && id != originator &&
             IsAddrCompatible(*peer, addr)) {
             uint64_t hashKey = CSipHasher(hasher).Write(id).Finalize();
             for (unsigned int i = 0; i < nRelayNodes; i++) {
                 if (hashKey > best[i].first) {
                     std::copy(best.begin() + i, best.begin() + nRelayNodes - 1,
                               best.begin() + i + 1);
                     best[i] = std::make_pair(hashKey, peer.get());
                     break;
                 }
             }
         }
     };
 
     for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
         PushAddress(*best[i].second, addr, insecure_rand);
     }
 }
 
 void PeerManagerImpl::ProcessGetBlockData(const Config &config, CNode &pfrom,
                                           Peer &peer, const CInv &inv) {
     const BlockHash hash(inv.hash);
 
     std::shared_ptr<const CBlock> a_recent_block;
     std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
     {
         LOCK(m_most_recent_block_mutex);
         a_recent_block = m_most_recent_block;
         a_recent_compact_block = m_most_recent_compact_block;
     }
 
     bool need_activate_chain = false;
     {
         LOCK(cs_main);
         const CBlockIndex *pindex =
             m_chainman.m_blockman.LookupBlockIndex(hash);
         if (pindex) {
             if (pindex->HaveTxsDownloaded() &&
                 !pindex->IsValid(BlockValidity::SCRIPTS) &&
                 pindex->IsValid(BlockValidity::TREE)) {
                 // If we have the block and all of its parents, but have not yet
                 // validated it, we might be in the middle of connecting it (ie
                 // in the unlock of cs_main before ActivateBestChain but after
                 // AcceptBlock). In this case, we need to run ActivateBestChain
                 // prior to checking the relay conditions below.
                 need_activate_chain = true;
             }
         }
     } // release cs_main before calling ActivateBestChain
     if (need_activate_chain) {
         BlockValidationState state;
         if (!m_chainman.ActiveChainstate().ActivateBestChain(state,
                                                              a_recent_block)) {
             LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                      state.ToString());
         }
     }
 
     LOCK(cs_main);
     const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
     if (!pindex) {
         return;
     }
     if (!BlockRequestAllowed(pindex)) {
         LogPrint(BCLog::NET,
                  "%s: ignoring request from peer=%i for old "
                  "block that isn't in the main chain\n",
                  __func__, pfrom.GetId());
         return;
     }
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
     // Disconnect node in case we have reached the outbound limit for serving
     // historical blocks.
     if (m_connman.OutboundTargetReached(true) &&
         (((m_chainman.m_best_header != nullptr) &&
           (m_chainman.m_best_header->GetBlockTime() - pindex->GetBlockTime() >
            HISTORICAL_BLOCK_AGE)) ||
          inv.IsMsgFilteredBlk()) &&
         // nodes with the download permission may exceed target
         !pfrom.HasPermission(NetPermissionFlags::Download)) {
         LogPrint(BCLog::NET,
                  "historical block serving limit reached, disconnect peer=%d\n",
                  pfrom.GetId());
         pfrom.fDisconnect = true;
         return;
     }
     // Avoid leaking prune-height by never sending blocks below the
     // NODE_NETWORK_LIMITED threshold.
     // Add two blocks buffer extension for possible races
     if (!pfrom.HasPermission(NetPermissionFlags::NoBan) &&
         ((((peer.m_our_services & NODE_NETWORK_LIMITED) ==
            NODE_NETWORK_LIMITED) &&
           ((peer.m_our_services & NODE_NETWORK) != NODE_NETWORK) &&
           (m_chainman.ActiveChain().Tip()->nHeight - pindex->nHeight >
            (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) {
         LogPrint(BCLog::NET,
                  "Ignore block request below NODE_NETWORK_LIMITED "
                  "threshold, disconnect peer=%d\n",
                  pfrom.GetId());
 
         // disconnect node and prevent it from stalling (would otherwise wait
         // for the missing block)
         pfrom.fDisconnect = true;
         return;
     }
     // Pruned nodes may have deleted the block, so check whether it's available
     // before trying to send.
     if (!pindex->nStatus.hasData()) {
         return;
     }
     std::shared_ptr<const CBlock> pblock;
     if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) {
         pblock = a_recent_block;
     } else {
         // Send block from disk
         std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
         if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, *pindex)) {
             assert(!"cannot load block from disk");
         }
         pblock = pblockRead;
     }
     if (inv.IsMsgBlk()) {
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::BLOCK, *pblock));
     } else if (inv.IsMsgFilteredBlk()) {
         bool sendMerkleBlock = false;
         CMerkleBlock merkleBlock;
         if (auto tx_relay = peer.GetTxRelay()) {
             LOCK(tx_relay->m_bloom_filter_mutex);
             if (tx_relay->m_bloom_filter) {
                 sendMerkleBlock = true;
                 merkleBlock = CMerkleBlock(*pblock, *tx_relay->m_bloom_filter);
             }
         }
         if (sendMerkleBlock) {
             m_connman.PushMessage(
                 &pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
             // CMerkleBlock just contains hashes, so also push any
             // transactions in the block the client did not see. This avoids
             // hurting performance by pointlessly requiring a round-trip.
             // Note that there is currently no way for a node to request any
             // single transactions we didn't send here - they must either
             // disconnect and retry or request the full block. Thus, the
             // protocol spec specified allows for us to provide duplicate
             // txn here, however we MUST always provide at least what the
             // remote peer needs.
             typedef std::pair<size_t, uint256> PairType;
             for (PairType &pair : merkleBlock.vMatchedTxn) {
                 m_connman.PushMessage(
                     &pfrom,
                     msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first]));
             }
         }
         // else
         // no response
     } else if (inv.IsMsgCmpctBlk()) {
         // If a peer is asking for old blocks, we're almost guaranteed they
         // won't have a useful mempool to match against a compact block, and
         // we don't feel like constructing the object for them, so instead
         // we respond with the full, non-compact block.
         int nSendFlags = 0;
         if (CanDirectFetch() &&
             pindex->nHeight >=
                 m_chainman.ActiveChain().Height() - MAX_CMPCTBLOCK_DEPTH) {
             if (a_recent_compact_block &&
                 a_recent_compact_block->header.GetHash() ==
                     pindex->GetBlockHash()) {
                 m_connman.PushMessage(&pfrom,
                                       msgMaker.Make(NetMsgType::CMPCTBLOCK,
                                                     *a_recent_compact_block));
             } else {
                 CBlockHeaderAndShortTxIDs cmpctblock(*pblock);
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK,
                                           cmpctblock));
             }
         } else {
             m_connman.PushMessage(
                 &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
         }
     }
 
     {
         LOCK(peer.m_block_inv_mutex);
         // Trigger the peer node to send a getblocks request for the next
         // batch of inventory.
         if (hash == peer.m_continuation_block) {
             // Send immediately. This must send even if redundant, and
             // we want it right after the last block so they don't wait for
             // other stuff first.
             std::vector<CInv> vInv;
             vInv.push_back(CInv(
                 MSG_BLOCK, m_chainman.ActiveChain().Tip()->GetBlockHash()));
             m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv));
             peer.m_continuation_block = BlockHash();
         }
     }
 }
 
 CTransactionRef
 PeerManagerImpl::FindTxForGetData(const Peer &peer, const TxId &txid,
                                   const std::chrono::seconds mempool_req,
                                   const std::chrono::seconds now) {
     auto txinfo = m_mempool.info(txid);
     if (txinfo.tx) {
         // If a TX could have been INVed in reply to a MEMPOOL request,
         // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request
         // unconditionally.
         if ((mempool_req.count() && txinfo.m_time <= mempool_req) ||
             txinfo.m_time <= now - UNCONDITIONAL_RELAY_DELAY) {
             return std::move(txinfo.tx);
         }
     }
 
     {
         LOCK(cs_main);
 
         // Otherwise, the transaction must have been announced recently.
         if (Assume(peer.GetTxRelay())
                 ->m_recently_announced_invs.contains(txid)) {
             // If it was, it can be relayed from either the mempool...
             if (txinfo.tx) {
                 return std::move(txinfo.tx);
             }
             // ... or the relay pool.
             auto mi = mapRelay.find(txid);
             if (mi != mapRelay.end()) {
                 return mi->second;
             }
         }
     }
 
     return {};
 }
 
 //! Determine whether or not a peer can request a proof, and return it (or
 //! nullptr if not found or not allowed).
 avalanche::ProofRef
 PeerManagerImpl::FindProofForGetData(const Peer &peer,
                                      const avalanche::ProofId &proofid,
                                      const std::chrono::seconds now) {
     avalanche::ProofRef proof;
 
     bool send_unconditionally =
-        g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
+        m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
             return pm.forPeer(proofid, [&](const avalanche::Peer &peer) {
                 proof = peer.proof;
 
                 // If we know that proof for long enough, allow for requesting
                 // it.
                 return peer.registration_time <=
                        now - UNCONDITIONAL_RELAY_DELAY;
             });
         });
 
     if (!proof) {
         // Always send our local proof if it gets requested, assuming it's
         // valid. This will make it easier to bind with peers upon startup where
         // the status of our proof is unknown pending for a block. Note that it
         // still needs to have been announced first (presumably via an avahello
         // message).
-        proof = g_avalanche->getLocalProof();
+        proof = m_avalanche->getLocalProof();
     }
 
     // We don't have this proof
     if (!proof) {
         return avalanche::ProofRef();
     }
 
     if (send_unconditionally) {
         return proof;
     }
 
     // Otherwise, the proofs must have been announced recently.
     if (peer.m_proof_relay->m_recently_announced_proofs.contains(proofid)) {
         return proof;
     }
 
     return avalanche::ProofRef();
 }
 
 void PeerManagerImpl::ProcessGetData(
     const Config &config, CNode &pfrom, Peer &peer,
     const std::atomic<bool> &interruptMsgProc) {
     AssertLockNotHeld(cs_main);
 
     auto tx_relay = peer.GetTxRelay();
 
     std::deque<CInv>::iterator it = peer.m_getdata_requests.begin();
     std::vector<CInv> vNotFound;
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     const auto now{GetTime<std::chrono::seconds>()};
     // Get last mempool request time
     const auto mempool_req = tx_relay != nullptr
                                  ? tx_relay->m_last_mempool_req.load()
                                  : std::chrono::seconds::min();
 
     // Process as many TX or AVA_PROOF items from the front of the getdata
     // queue as possible, since they're common and it's efficient to batch
     // process them.
     while (it != peer.m_getdata_requests.end()) {
         if (interruptMsgProc) {
             return;
         }
         // The send buffer provides backpressure. If there's no space in
         // the buffer, pause processing until the next call.
         if (pfrom.fPauseSend) {
             break;
         }
 
         const CInv &inv = *it;
 
         if (it->IsMsgProof()) {
-            if (!g_avalanche) {
+            if (!m_avalanche) {
                 vNotFound.push_back(inv);
                 ++it;
                 continue;
             }
             const avalanche::ProofId proofid(inv.hash);
             auto proof = FindProofForGetData(peer, proofid, now);
             if (proof) {
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
-                g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
+                m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
                     pm.removeUnbroadcastProof(proofid);
                 });
             } else {
                 vNotFound.push_back(inv);
             }
 
             ++it;
             continue;
         }
 
         if (it->IsMsgTx()) {
             if (tx_relay == nullptr) {
                 // Ignore GETDATA requests for transactions from
                 // block-relay-only peers and peers that asked us not to
                 // announce transactions.
                 continue;
             }
 
             const TxId txid(inv.hash);
             CTransactionRef tx = FindTxForGetData(peer, txid, mempool_req, now);
             if (tx) {
                 int nSendFlags = 0;
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
                 m_mempool.RemoveUnbroadcastTx(txid);
                 // As we're going to send tx, make sure its unconfirmed parents
                 // are made requestable.
                 std::vector<TxId> parent_ids_to_add;
                 {
                     LOCK(m_mempool.cs);
                     auto txiter = m_mempool.GetIter(tx->GetId());
                     if (txiter) {
                         auto &pentry = *txiter;
                         const CTxMemPoolEntry::Parents &parents =
                             (*pentry)->GetMemPoolParentsConst();
                         parent_ids_to_add.reserve(parents.size());
                         for (const auto &parent : parents) {
                             if (parent.get()->GetTime() >
                                 now - UNCONDITIONAL_RELAY_DELAY) {
                                 parent_ids_to_add.push_back(
                                     parent.get()->GetTx().GetId());
                             }
                         }
                     }
                 }
                 for (const TxId &parent_txid : parent_ids_to_add) {
                     // Relaying a transaction with a recent but unconfirmed
                     // parent.
                     if (WITH_LOCK(tx_relay->m_tx_inventory_mutex,
                                   return !tx_relay->m_tx_inventory_known_filter
                                               .contains(parent_txid))) {
                         tx_relay->m_recently_announced_invs.insert(parent_txid);
                     }
                 }
             } else {
                 vNotFound.push_back(inv);
             }
 
             ++it;
             continue;
         }
 
         // It's neither a proof nor a transaction
         break;
     }
 
     // Only process one BLOCK item per call, since they're uncommon and can be
     // expensive to process.
     if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) {
         const CInv &inv = *it++;
         if (inv.IsGenBlkMsg()) {
             ProcessGetBlockData(config, pfrom, peer, inv);
         }
         // else: If the first item on the queue is an unknown type, we erase it
         // and continue processing the queue on the next call.
     }
 
     peer.m_getdata_requests.erase(peer.m_getdata_requests.begin(), it);
 
     if (!vNotFound.empty()) {
         // Let the peer know that we didn't find what it asked for, so it
         // doesn't have to wait around forever. SPV clients care about this
         // message: it's needed when they are recursively walking the
         // dependencies of relevant unconfirmed transactions. SPV clients want
         // to do that because they want to know about (and store and rebroadcast
         // and risk analyze) the dependencies of transactions relevant to them,
         // without having to download the entire memory pool. Also, other nodes
         // can use these messages to automatically request a transaction from
         // some other peer that annnounced it, and stop waiting for us to
         // respond. In normal operation, we often send NOTFOUND messages for
         // parents of transactions that we relay; if a peer is missing a parent,
         // they may assume we have them and request the parents from us.
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
     }
 }
 
 void PeerManagerImpl::SendBlockTransactions(
     CNode &pfrom, Peer &peer, const CBlock &block,
     const BlockTransactionsRequest &req) {
     BlockTransactions resp(req);
     for (size_t i = 0; i < req.indices.size(); i++) {
         if (req.indices[i] >= block.vtx.size()) {
             Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
             return;
         }
         resp.txn[i] = block.vtx[req.indices[i]];
     }
     LOCK(cs_main);
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
     int nSendFlags = 0;
     m_connman.PushMessage(
         &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
 }
 
 bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader> &headers,
                                       const Consensus::Params &consensusParams,
                                       Peer &peer) {
     // Do these headers have proof-of-work matching what's claimed?
     if (!HasValidProofOfWork(headers, consensusParams)) {
         Misbehaving(peer, 100, "header with invalid proof of work");
         return false;
     }
 
     // Are these headers connected to each other?
     if (!CheckHeadersAreContinuous(headers)) {
         Misbehaving(peer, 20, "non-continuous headers sequence");
         return false;
     }
     return true;
 }
 
 arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold() {
     arith_uint256 near_chaintip_work = 0;
     LOCK(cs_main);
     if (m_chainman.ActiveChain().Tip() != nullptr) {
         const CBlockIndex *tip = m_chainman.ActiveChain().Tip();
         // Use a 144 block buffer, so that we'll accept headers that fork from
         // near our tip.
         near_chaintip_work =
             tip->nChainWork -
             std::min<arith_uint256>(144 * GetBlockProof(*tip), tip->nChainWork);
     }
     return std::max(near_chaintip_work, m_chainman.MinimumChainWork());
 }
 
 /**
  * Special handling for unconnecting headers that might be part of a block
  * announcement.
  *
  * We'll send a getheaders message in response to try to connect the chain.
  *
  * The peer can send up to MAX_NUM_UNCONNECTING_HEADERS_MSGS in a row that
  * don't connect before being given DoS points.
  *
  * Once a headers message is received that is valid and does connect,
  * m_num_unconnecting_headers_msgs gets reset back to 0.
  */
 void PeerManagerImpl::HandleFewUnconnectingHeaders(
     CNode &pfrom, Peer &peer, const std::vector<CBlockHeader> &headers) {
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     peer.m_num_unconnecting_headers_msgs++;
     // Try to fill in the missing headers.
     const CBlockIndex *best_header{
         WITH_LOCK(cs_main, return m_chainman.m_best_header)};
     if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
         LogPrint(
             BCLog::NET,
             "received header %s: missing prev block %s, sending getheaders "
             "(%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
             headers[0].GetHash().ToString(),
             headers[0].hashPrevBlock.ToString(), best_header->nHeight,
             pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
     }
 
     // Set hashLastUnknownBlock for this peer, so that if we
     // eventually get the headers - even from a different peer -
     // we can use this peer to download.
     WITH_LOCK(cs_main,
               UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
 
     // The peer may just be broken, so periodically assign DoS points if this
     // condition persists.
     if (peer.m_num_unconnecting_headers_msgs %
             MAX_NUM_UNCONNECTING_HEADERS_MSGS ==
         0) {
         Misbehaving(peer, 20,
                     strprintf("%d non-connecting headers",
                               peer.m_num_unconnecting_headers_msgs));
     }
 }
 
 bool PeerManagerImpl::CheckHeadersAreContinuous(
     const std::vector<CBlockHeader> &headers) const {
     BlockHash hashLastBlock;
     for (const CBlockHeader &header : headers) {
         if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
             return false;
         }
         hashLastBlock = header.GetHash();
     }
     return true;
 }
 
 bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(
     Peer &peer, CNode &pfrom, std::vector<CBlockHeader> &headers) {
     if (peer.m_headers_sync) {
         auto result = peer.m_headers_sync->ProcessNextHeaders(
             headers, headers.size() == MAX_HEADERS_RESULTS);
         if (result.request_more) {
             auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
             // If we were instructed to ask for a locator, it should not be
             // empty.
             Assume(!locator.vHave.empty());
             if (!locator.vHave.empty()) {
                 // It should be impossible for the getheaders request to fail,
                 // because we should have cleared the last getheaders timestamp
                 // when processing the headers that triggered this call. But
                 // it may be possible to bypass this via compactblock
                 // processing, so check the result before logging just to be
                 // safe.
                 bool sent_getheaders =
                     MaybeSendGetHeaders(pfrom, locator, peer);
                 if (sent_getheaders) {
                     LogPrint(BCLog::NET,
                              "more getheaders (from %s) to peer=%d\n",
                              locator.vHave.front().ToString(), pfrom.GetId());
                 } else {
                     LogPrint(BCLog::NET,
                              "error sending next getheaders (from %s) to "
                              "continue sync with peer=%d\n",
                              locator.vHave.front().ToString(), pfrom.GetId());
                 }
             }
         }
 
         if (peer.m_headers_sync->GetState() == HeadersSyncState::State::FINAL) {
             peer.m_headers_sync.reset(nullptr);
 
             // Delete this peer's entry in m_headers_presync_stats.
             // If this is m_headers_presync_bestpeer, it will be replaced later
             // by the next peer that triggers the else{} branch below.
             LOCK(m_headers_presync_mutex);
             m_headers_presync_stats.erase(pfrom.GetId());
         } else {
             // Build statistics for this peer's sync.
             HeadersPresyncStats stats;
             stats.first = peer.m_headers_sync->GetPresyncWork();
             if (peer.m_headers_sync->GetState() ==
                 HeadersSyncState::State::PRESYNC) {
                 stats.second = {peer.m_headers_sync->GetPresyncHeight(),
                                 peer.m_headers_sync->GetPresyncTime()};
             }
 
             // Update statistics in stats.
             LOCK(m_headers_presync_mutex);
             m_headers_presync_stats[pfrom.GetId()] = stats;
             auto best_it =
                 m_headers_presync_stats.find(m_headers_presync_bestpeer);
             bool best_updated = false;
             if (best_it == m_headers_presync_stats.end()) {
                 // If the cached best peer is outdated, iterate over all
                 // remaining ones (including newly updated one) to find the best
                 // one.
                 NodeId peer_best{-1};
                 const HeadersPresyncStats *stat_best{nullptr};
                 for (const auto &[_peer, _stat] : m_headers_presync_stats) {
                     if (!stat_best || _stat > *stat_best) {
                         peer_best = _peer;
                         stat_best = &_stat;
                     }
                 }
                 m_headers_presync_bestpeer = peer_best;
                 best_updated = (peer_best == pfrom.GetId());
             } else if (best_it->first == pfrom.GetId() ||
                        stats > best_it->second) {
                 // pfrom was and remains the best peer, or pfrom just became
                 // best.
                 m_headers_presync_bestpeer = pfrom.GetId();
                 best_updated = true;
             }
             if (best_updated && stats.second.has_value()) {
                 // If the best peer updated, and it is in its first phase,
                 // signal.
                 m_headers_presync_should_signal = true;
             }
         }
 
         if (result.success) {
             // We only overwrite the headers passed in if processing was
             // successful.
             headers.swap(result.pow_validated_headers);
         }
 
         return result.success;
     }
     // Either we didn't have a sync in progress, or something went wrong
     // processing these headers, or we are returning headers to the caller to
     // process.
     return false;
 }
 
 bool PeerManagerImpl::TryLowWorkHeadersSync(
     Peer &peer, CNode &pfrom, const CBlockIndex *chain_start_header,
     std::vector<CBlockHeader> &headers) {
     // Calculate the total work on this chain.
     arith_uint256 total_work =
         chain_start_header->nChainWork + CalculateHeadersWork(headers);
 
     // Our dynamic anti-DoS threshold (minimum work required on a headers chain
     // before we'll store it)
     arith_uint256 minimum_chain_work = GetAntiDoSWorkThreshold();
 
     // Avoid DoS via low-difficulty-headers by only processing if the headers
     // are part of a chain with sufficient work.
     if (total_work < minimum_chain_work) {
         // Only try to sync with this peer if their headers message was full;
         // otherwise they don't have more headers after this so no point in
         // trying to sync their too-little-work chain.
         if (headers.size() == MAX_HEADERS_RESULTS) {
             // Note: we could advance to the last header in this set that is
             // known to us, rather than starting at the first header (which we
             // may already have); however this is unlikely to matter much since
             // ProcessHeadersMessage() already handles the case where all
             // headers in a received message are already known and are
             // ancestors of m_best_header or chainActive.Tip(), by skipping
             // this logic in that case. So even if the first header in this set
             // of headers is known, some header in this set must be new, so
             // advancing to the first unknown header would be a small effect.
             LOCK(peer.m_headers_sync_mutex);
             peer.m_headers_sync.reset(
                 new HeadersSyncState(peer.m_id, m_chainparams.GetConsensus(),
                                      chain_start_header, minimum_chain_work));
 
             // Now a HeadersSyncState object for tracking this synchronization
             // is created, process the headers using it as normal.
             return IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
         }
 
         LogPrint(BCLog::NET,
                  "Ignoring low-work chain (height=%u) from peer=%d\n",
                  chain_start_header->nHeight + headers.size(), pfrom.GetId());
         // Since this is a low-work headers chain, no further processing is
         // required.
         headers = {};
         return true;
     }
     return false;
 }
 
 bool PeerManagerImpl::IsAncestorOfBestHeaderOrTip(const CBlockIndex *header) {
     return header != nullptr &&
            ((m_chainman.m_best_header != nullptr &&
              header ==
                  m_chainman.m_best_header->GetAncestor(header->nHeight)) ||
             m_chainman.ActiveChain().Contains(header));
 }
 
 bool PeerManagerImpl::MaybeSendGetHeaders(CNode &pfrom,
                                           const CBlockLocator &locator,
                                           Peer &peer) {
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     const auto current_time = NodeClock::now();
 
     // Only allow a new getheaders message to go out if we don't have a recent
     // one already in-flight
     if (current_time - peer.m_last_getheaders_timestamp >
         HEADERS_RESPONSE_TIME) {
         m_connman.PushMessage(
             &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, locator, uint256()));
         peer.m_last_getheaders_timestamp = current_time;
         return true;
     }
     return false;
 }
 
 /**
  * Given a new headers tip ending in pindexLast, potentially request blocks
  * towards that tip. We require that the given tip have at least as much work as
  * our tip, and for our current tip to be "close to synced" (see
  * CanDirectFetch()).
  */
 void PeerManagerImpl::HeadersDirectFetchBlocks(const Config &config,
                                                CNode &pfrom,
                                                const CBlockIndex *pindexLast) {
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     LOCK(cs_main);
     CNodeState *nodestate = State(pfrom.GetId());
 
     if (CanDirectFetch() && pindexLast->IsValid(BlockValidity::TREE) &&
         m_chainman.ActiveChain().Tip()->nChainWork <= pindexLast->nChainWork) {
         std::vector<const CBlockIndex *> vToFetch;
         const CBlockIndex *pindexWalk = pindexLast;
         // Calculate all the blocks we'd need to switch to pindexLast, up to
         // a limit.
         while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) &&
                vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             if (!pindexWalk->nStatus.hasData() &&
                 !IsBlockRequested(pindexWalk->GetBlockHash())) {
                 // We don't have this block, and it's not yet in flight.
                 vToFetch.push_back(pindexWalk);
             }
             pindexWalk = pindexWalk->pprev;
         }
         // If pindexWalk still isn't on our main chain, we're looking at a
         // very large reorg at a time we think we're close to caught up to
         // the main chain -- this shouldn't really happen. Bail out on the
         // direct fetch and rely on parallel download instead.
         if (!m_chainman.ActiveChain().Contains(pindexWalk)) {
             LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
                      pindexLast->GetBlockHash().ToString(),
                      pindexLast->nHeight);
         } else {
             std::vector<CInv> vGetData;
             // Download as much as possible, from earliest to latest.
             for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
                 if (nodestate->nBlocksInFlight >=
                     MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
                     // Can't download any more from this peer
                     break;
                 }
                 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
                 BlockRequested(config, pfrom.GetId(), *pindex);
                 LogPrint(BCLog::NET, "Requesting block %s from  peer=%d\n",
                          pindex->GetBlockHash().ToString(), pfrom.GetId());
             }
             if (vGetData.size() > 1) {
                 LogPrint(BCLog::NET,
                          "Downloading blocks toward %s (%d) via headers "
                          "direct fetch\n",
                          pindexLast->GetBlockHash().ToString(),
                          pindexLast->nHeight);
             }
             if (vGetData.size() > 0) {
                 if (!m_ignore_incoming_txs &&
                     nodestate->m_provides_cmpctblocks && vGetData.size() == 1 &&
                     mapBlocksInFlight.size() == 1 &&
                     pindexLast->pprev->IsValid(BlockValidity::CHAIN)) {
                     // In any case, we want to download using a compact
                     // block, not a regular one.
                     vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
                 }
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
             }
         }
     }
 }
 
 /**
  * Given receipt of headers from a peer ending in pindexLast, along with
  * whether that header was new and whether the headers message was full,
  * update the state we keep for the peer.
  */
 void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(
     CNode &pfrom, Peer &peer, const CBlockIndex *pindexLast,
     bool received_new_header, bool may_have_more_headers) {
     if (peer.m_num_unconnecting_headers_msgs > 0) {
         LogPrint(
             BCLog::NET,
             "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n",
             pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
     }
     peer.m_num_unconnecting_headers_msgs = 0;
 
     LOCK(cs_main);
 
     CNodeState *nodestate = State(pfrom.GetId());
 
     assert(pindexLast);
     UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash());
 
     // From here, pindexBestKnownBlock should be guaranteed to be non-null,
     // because it is set in UpdateBlockAvailability. Some nullptr checks are
     // still present, however, as belt-and-suspenders.
 
     if (received_new_header &&
         pindexLast->nChainWork > m_chainman.ActiveChain().Tip()->nChainWork) {
         nodestate->m_last_block_announcement = GetTime();
     }
 
     // If we're in IBD, we want outbound peers that will serve us a useful
     // chain. Disconnect peers that are on chains with insufficient work.
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
         !may_have_more_headers) {
         // When nCount < MAX_HEADERS_RESULTS, we know we have no more
         // headers to fetch from this peer.
         if (nodestate->pindexBestKnownBlock &&
             nodestate->pindexBestKnownBlock->nChainWork <
                 m_chainman.MinimumChainWork()) {
             // This peer has too little work on their headers chain to help
             // us sync -- disconnect if it is an outbound disconnection
             // candidate.
             // Note: We compare their tip to the minimum chain work (rather than
             // m_chainman.ActiveChain().Tip()) because we won't start block
             // download until we have a headers chain that has at least
             // the minimum chain work, even if a peer has a chain past our tip,
             // as an anti-DoS measure.
             if (pfrom.IsOutboundOrBlockRelayConn()) {
                 LogPrintf("Disconnecting outbound peer %d -- headers "
                           "chain has insufficient work\n",
                           pfrom.GetId());
                 pfrom.fDisconnect = true;
             }
         }
     }
 
     // If this is an outbound full-relay peer, check to see if we should
     // protect it from the bad/lagging chain logic.
     // Note that outbound block-relay peers are excluded from this
     // protection, and thus always subject to eviction under the bad/lagging
     // chain logic.
     // See ChainSyncTimeoutState.
     if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() &&
         nodestate->pindexBestKnownBlock != nullptr) {
         if (m_outbound_peers_with_protect_from_disconnect <
                 MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT &&
             nodestate->pindexBestKnownBlock->nChainWork >=
                 m_chainman.ActiveChain().Tip()->nChainWork &&
             !nodestate->m_chain_sync.m_protect) {
             LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n",
                      pfrom.GetId());
             nodestate->m_chain_sync.m_protect = true;
             ++m_outbound_peers_with_protect_from_disconnect;
         }
     }
 }
 
 void PeerManagerImpl::ProcessHeadersMessage(const Config &config, CNode &pfrom,
                                             Peer &peer,
                                             std::vector<CBlockHeader> &&headers,
                                             bool via_compact_block) {
     size_t nCount = headers.size();
 
     if (nCount == 0) {
         // Nothing interesting. Stop asking this peers for more headers.
         // If we were in the middle of headers sync, receiving an empty headers
         // message suggests that the peer suddenly has nothing to give us
         // (perhaps it reorged to our chain). Clear download state for this
         // peer.
         LOCK(peer.m_headers_sync_mutex);
         if (peer.m_headers_sync) {
             peer.m_headers_sync.reset(nullptr);
             LOCK(m_headers_presync_mutex);
             m_headers_presync_stats.erase(pfrom.GetId());
         }
         return;
     }
 
     // Before we do any processing, make sure these pass basic sanity checks.
     // We'll rely on headers having valid proof-of-work further down, as an
     // anti-DoS criteria (note: this check is required before passing any
     // headers into HeadersSyncState).
     if (!CheckHeadersPoW(headers, m_chainparams.GetConsensus(), peer)) {
         // Misbehaving() calls are handled within CheckHeadersPoW(), so we can
         // just return. (Note that even if a header is announced via compact
         // block, the header itself should be valid, so this type of error can
         // always be punished.)
         return;
     }
 
     const CBlockIndex *pindexLast = nullptr;
 
     // We'll set already_validated_work to true if these headers are
     // successfully processed as part of a low-work headers sync in progress
     // (either in PRESYNC or REDOWNLOAD phase).
     // If true, this will mean that any headers returned to us (ie during
     // REDOWNLOAD) can be validated without further anti-DoS checks.
     bool already_validated_work = false;
 
     // If we're in the middle of headers sync, let it do its magic.
     bool have_headers_sync = false;
     {
         LOCK(peer.m_headers_sync_mutex);
 
         already_validated_work =
             IsContinuationOfLowWorkHeadersSync(peer, pfrom, headers);
 
         // The headers we passed in may have been:
         // - untouched, perhaps if no headers-sync was in progress, or some
         //   failure occurred
         // - erased, such as if the headers were successfully processed and no
         //   additional headers processing needs to take place (such as if we
         //   are still in PRESYNC)
         // - replaced with headers that are now ready for validation, such as
         //   during the REDOWNLOAD phase of a low-work headers sync.
         // So just check whether we still have headers that we need to process,
         // or not.
         if (headers.empty()) {
             return;
         }
 
         have_headers_sync = !!peer.m_headers_sync;
     }
 
     // Do these headers connect to something in our block index?
     const CBlockIndex *chain_start_header{
         WITH_LOCK(::cs_main, return m_chainman.m_blockman.LookupBlockIndex(
                                  headers[0].hashPrevBlock))};
     bool headers_connect_blockindex{chain_start_header != nullptr};
 
     if (!headers_connect_blockindex) {
         if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
             // If this looks like it could be a BIP 130 block announcement, use
             // special logic for handling headers that don't connect, as this
             // could be benign.
             HandleFewUnconnectingHeaders(pfrom, peer, headers);
         } else {
             Misbehaving(peer, 10, "invalid header received");
         }
         return;
     }
 
     // If the headers we received are already in memory and an ancestor of
     // m_best_header or our tip, skip anti-DoS checks. These headers will not
     // use any more memory (and we are not leaking information that could be
     // used to fingerprint us).
     const CBlockIndex *last_received_header{nullptr};
     {
         LOCK(cs_main);
         last_received_header =
             m_chainman.m_blockman.LookupBlockIndex(headers.back().GetHash());
         if (IsAncestorOfBestHeaderOrTip(last_received_header)) {
             already_validated_work = true;
         }
     }
 
     // If our peer has NetPermissionFlags::NoBan privileges, then bypass our
     // anti-DoS logic (this saves bandwidth when we connect to a trusted peer
     // on startup).
     if (pfrom.HasPermission(NetPermissionFlags::NoBan)) {
         already_validated_work = true;
     }
 
     // At this point, the headers connect to something in our block index.
     // Do anti-DoS checks to determine if we should process or store for later
     // processing.
     if (!already_validated_work &&
         TryLowWorkHeadersSync(peer, pfrom, chain_start_header, headers)) {
         // If we successfully started a low-work headers sync, then there
         // should be no headers to process any further.
         Assume(headers.empty());
         return;
     }
 
     // At this point, we have a set of headers with sufficient work on them
     // which can be processed.
 
     // If we don't have the last header, then this peer will have given us
     // something new (if these headers are valid).
     bool received_new_header{last_received_header == nullptr};
 
     // Now process all the headers.
     BlockValidationState state;
     if (!m_chainman.ProcessNewBlockHeaders(headers, /*min_pow_checked=*/true,
                                            state, &pindexLast)) {
         if (state.IsInvalid()) {
             MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block,
                                     "invalid header received");
             return;
         }
     }
     Assume(pindexLast);
 
     // Consider fetching more headers if we are not using our headers-sync
     // mechanism.
     if (nCount == MAX_HEADERS_RESULTS && !have_headers_sync) {
         // Headers message had its maximum size; the peer may have more headers.
         if (MaybeSendGetHeaders(pfrom, GetLocator(pindexLast), peer)) {
             LogPrint(
                 BCLog::NET,
                 "more getheaders (%d) to end to peer=%d (startheight:%d)\n",
                 pindexLast->nHeight, pfrom.GetId(), peer.m_starting_height);
         }
     }
 
     UpdatePeerStateForReceivedHeaders(pfrom, peer, pindexLast,
                                       received_new_header,
                                       nCount == MAX_HEADERS_RESULTS);
 
     // Consider immediately downloading blocks.
     HeadersDirectFetchBlocks(config, pfrom, pindexLast);
 }
 
 bool PeerManagerImpl::ProcessOrphanTx(const Config &config, Peer &peer) {
     AssertLockHeld(g_msgproc_mutex);
     LOCK(cs_main);
 
     while (CTransactionRef porphanTx =
                m_orphanage.GetTxToReconsider(peer.m_id)) {
         const MempoolAcceptResult result =
             m_chainman.ProcessTransaction(porphanTx);
         const TxValidationState &state = result.m_state;
         const TxId &orphanTxId = porphanTx->GetId();
 
         if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
             LogPrint(BCLog::MEMPOOL, "   accepted orphan tx %s\n",
                      orphanTxId.ToString());
             RelayTransaction(orphanTxId);
             m_orphanage.AddChildrenToWorkSet(*porphanTx);
             m_orphanage.EraseTx(orphanTxId);
             return true;
         } else if (state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) {
             if (state.IsInvalid()) {
                 LogPrint(BCLog::MEMPOOL,
                          "   invalid orphan tx %s from peer=%d. %s\n",
                          orphanTxId.ToString(), peer.m_id, state.ToString());
                 // Punish peer that gave us an invalid orphan tx
                 MaybePunishNodeForTx(peer.m_id, state);
             }
             // Has inputs but not accepted to mempool
             // Probably non-standard or insufficient fee
             LogPrint(BCLog::MEMPOOL, "   removed orphan tx %s\n",
                      orphanTxId.ToString());
 
             m_recent_rejects.insert(orphanTxId);
 
             m_orphanage.EraseTx(orphanTxId);
             return true;
         }
     }
 
     return false;
 }
 
 bool PeerManagerImpl::PrepareBlockFilterRequest(
     CNode &node, Peer &peer, BlockFilterType filter_type, uint32_t start_height,
     const BlockHash &stop_hash, uint32_t max_height_diff,
     const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) {
     const bool supported_filter_type =
         (filter_type == BlockFilterType::BASIC &&
          (peer.m_our_services & NODE_COMPACT_FILTERS));
     if (!supported_filter_type) {
         LogPrint(BCLog::NET,
                  "peer %d requested unsupported block filter type: %d\n",
                  node.GetId(), static_cast<uint8_t>(filter_type));
         node.fDisconnect = true;
         return false;
     }
 
     {
         LOCK(cs_main);
         stop_index = m_chainman.m_blockman.LookupBlockIndex(stop_hash);
 
         // Check that the stop block exists and the peer would be allowed to
         // fetch it.
         if (!stop_index || !BlockRequestAllowed(stop_index)) {
             LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n",
                      node.GetId(), stop_hash.ToString());
             node.fDisconnect = true;
             return false;
         }
     }
 
     uint32_t stop_height = stop_index->nHeight;
     if (start_height > stop_height) {
         LogPrint(
             BCLog::NET,
             "peer %d sent invalid getcfilters/getcfheaders with " /* Continued
                                                                    */
             "start height %d and stop height %d\n",
             node.GetId(), start_height, stop_height);
         node.fDisconnect = true;
         return false;
     }
     if (stop_height - start_height >= max_height_diff) {
         LogPrint(BCLog::NET,
                  "peer %d requested too many cfilters/cfheaders: %d / %d\n",
                  node.GetId(), stop_height - start_height + 1, max_height_diff);
         node.fDisconnect = true;
         return false;
     }
 
     filter_index = GetBlockFilterIndex(filter_type);
     if (!filter_index) {
         LogPrint(BCLog::NET, "Filter index for supported type %s not found\n",
                  BlockFilterTypeName(filter_type));
         return false;
     }
 
     return true;
 }
 
 void PeerManagerImpl::ProcessGetCFilters(CNode &node, Peer &peer,
                                          CDataStream &vRecv) {
     uint8_t filter_type_ser;
     uint32_t start_height;
     BlockHash stop_hash;
 
     vRecv >> filter_type_ser >> start_height >> stop_hash;
 
     const BlockFilterType filter_type =
         static_cast<BlockFilterType>(filter_type_ser);
 
     const CBlockIndex *stop_index;
     BlockFilterIndex *filter_index;
     if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
                                    stop_hash, MAX_GETCFILTERS_SIZE, stop_index,
                                    filter_index)) {
         return;
     }
 
     std::vector<BlockFilter> filters;
     if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) {
         LogPrint(BCLog::NET,
                  "Failed to find block filter in index: filter_type=%s, "
                  "start_height=%d, stop_hash=%s\n",
                  BlockFilterTypeName(filter_type), start_height,
                  stop_hash.ToString());
         return;
     }
 
     for (const auto &filter : filters) {
         CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
                                     .Make(NetMsgType::CFILTER, filter);
         m_connman.PushMessage(&node, std::move(msg));
     }
 }
 
 void PeerManagerImpl::ProcessGetCFHeaders(CNode &node, Peer &peer,
                                           CDataStream &vRecv) {
     uint8_t filter_type_ser;
     uint32_t start_height;
     BlockHash stop_hash;
 
     vRecv >> filter_type_ser >> start_height >> stop_hash;
 
     const BlockFilterType filter_type =
         static_cast<BlockFilterType>(filter_type_ser);
 
     const CBlockIndex *stop_index;
     BlockFilterIndex *filter_index;
     if (!PrepareBlockFilterRequest(node, peer, filter_type, start_height,
                                    stop_hash, MAX_GETCFHEADERS_SIZE, stop_index,
                                    filter_index)) {
         return;
     }
 
     uint256 prev_header;
     if (start_height > 0) {
         const CBlockIndex *const prev_block =
             stop_index->GetAncestor(static_cast<int>(start_height - 1));
         if (!filter_index->LookupFilterHeader(prev_block, prev_header)) {
             LogPrint(BCLog::NET,
                      "Failed to find block filter header in index: "
                      "filter_type=%s, block_hash=%s\n",
                      BlockFilterTypeName(filter_type),
                      prev_block->GetBlockHash().ToString());
             return;
         }
     }
 
     std::vector<uint256> filter_hashes;
     if (!filter_index->LookupFilterHashRange(start_height, stop_index,
                                              filter_hashes)) {
         LogPrint(BCLog::NET,
                  "Failed to find block filter hashes in index: filter_type=%s, "
                  "start_height=%d, stop_hash=%s\n",
                  BlockFilterTypeName(filter_type), start_height,
                  stop_hash.ToString());
         return;
     }
 
     CSerializedNetMsg msg =
         CNetMsgMaker(node.GetCommonVersion())
             .Make(NetMsgType::CFHEADERS, filter_type_ser,
                   stop_index->GetBlockHash(), prev_header, filter_hashes);
     m_connman.PushMessage(&node, std::move(msg));
 }
 
 void PeerManagerImpl::ProcessGetCFCheckPt(CNode &node, Peer &peer,
                                           CDataStream &vRecv) {
     uint8_t filter_type_ser;
     BlockHash stop_hash;
 
     vRecv >> filter_type_ser >> stop_hash;
 
     const BlockFilterType filter_type =
         static_cast<BlockFilterType>(filter_type_ser);
 
     const CBlockIndex *stop_index;
     BlockFilterIndex *filter_index;
     if (!PrepareBlockFilterRequest(
             node, peer, filter_type, /*start_height=*/0, stop_hash,
             /*max_height_diff=*/std::numeric_limits<uint32_t>::max(),
             stop_index, filter_index)) {
         return;
     }
 
     std::vector<uint256> headers(stop_index->nHeight / CFCHECKPT_INTERVAL);
 
     // Populate headers.
     const CBlockIndex *block_index = stop_index;
     for (int i = headers.size() - 1; i >= 0; i--) {
         int height = (i + 1) * CFCHECKPT_INTERVAL;
         block_index = block_index->GetAncestor(height);
 
         if (!filter_index->LookupFilterHeader(block_index, headers[i])) {
             LogPrint(BCLog::NET,
                      "Failed to find block filter header in index: "
                      "filter_type=%s, block_hash=%s\n",
                      BlockFilterTypeName(filter_type),
                      block_index->GetBlockHash().ToString());
             return;
         }
     }
 
     CSerializedNetMsg msg = CNetMsgMaker(node.GetCommonVersion())
                                 .Make(NetMsgType::CFCHECKPT, filter_type_ser,
                                       stop_index->GetBlockHash(), headers);
     m_connman.PushMessage(&node, std::move(msg));
 }
 
 bool IsAvalancheMessageType(const std::string &msg_type) {
     return msg_type == NetMsgType::AVAHELLO ||
            msg_type == NetMsgType::AVAPOLL ||
            msg_type == NetMsgType::AVARESPONSE ||
            msg_type == NetMsgType::AVAPROOF ||
            msg_type == NetMsgType::GETAVAADDR ||
            msg_type == NetMsgType::GETAVAPROOFS ||
            msg_type == NetMsgType::AVAPROOFS ||
            msg_type == NetMsgType::AVAPROOFSREQ;
 }
 
 uint32_t
 PeerManagerImpl::GetAvalancheVoteForBlock(const BlockHash &hash) const {
     AssertLockHeld(cs_main);
 
     const CBlockIndex *pindex = m_chainman.m_blockman.LookupBlockIndex(hash);
 
     // Unknown block.
     if (!pindex) {
         return -1;
     }
 
     // Invalid block
     if (pindex->nStatus.isInvalid()) {
         return 1;
     }
 
     // Parked block
     if (pindex->nStatus.isOnParkedChain()) {
         return 2;
     }
 
     const CBlockIndex *pindexTip = m_chainman.ActiveChain().Tip();
     const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip);
 
     // Active block.
     if (pindex == pindexFork) {
         return 0;
     }
 
     // Fork block.
     if (pindexFork != pindexTip) {
         return 3;
     }
 
     // Missing block data.
     if (!pindex->nStatus.hasData()) {
         return -2;
     }
 
     // This block is built on top of the tip, we have the data, it
     // is pending connection or rejection.
     return -3;
 };
 
 uint32_t PeerManagerImpl::GetAvalancheVoteForTx(const TxId &id) const {
     // Accepted in mempool, or in a recent block
     if (m_mempool.exists(id) ||
         WITH_LOCK(m_recent_confirmed_transactions_mutex,
                   return m_recent_confirmed_transactions.contains(id))) {
         return 0;
     }
 
     // Invalid tx
     if (m_recent_rejects.contains(id)) {
         return 1;
     }
 
     // Orphan tx
     if (m_orphanage.HaveTx(id)) {
         return -2;
     }
 
     // Unknown tx
     return -1;
 };
 
 /**
  * Decide a response for an Avalanche poll about the given proof.
  *
  * @param[in] id   The id of the proof being polled for
  * @return         Our current vote for the proof
  */
-static uint32_t getAvalancheVoteForProof(const avalanche::ProofId &id) {
-    assert(g_avalanche);
-
-    return g_avalanche->withPeerManager([&id](avalanche::PeerManager &pm) {
+static uint32_t getAvalancheVoteForProof(const avalanche::Processor &avalanche,
+                                         const avalanche::ProofId &id) {
+    return avalanche.withPeerManager([&id](avalanche::PeerManager &pm) {
         // Rejected proof
         if (pm.isInvalid(id)) {
             return 1;
         }
 
         // The proof is actively bound to a peer
         if (pm.isBoundToPeer(id)) {
             return 0;
         }
 
         // Unknown proof
         if (!pm.exists(id)) {
             return -1;
         }
 
         // Immature proof
         if (pm.isImmature(id)) {
             return 2;
         }
 
         // Not immature, but in conflict with an actively bound proof
         if (pm.isInConflictingPool(id)) {
             return 3;
         }
 
         // The proof is known, not rejected, not immature, not a conflict, but
         // for some reason unbound. This should not happen if the above pools
         // are managed correctly, but added for robustness.
         return -2;
     });
 };
 
 void PeerManagerImpl::ProcessBlock(const Config &config, CNode &node,
                                    const std::shared_ptr<const CBlock> &block,
                                    bool force_processing,
                                    bool min_pow_checked) {
     bool new_block{false};
     m_chainman.ProcessNewBlock(block, force_processing, min_pow_checked,
                                &new_block);
     if (new_block) {
         node.m_last_block_time = GetTime<std::chrono::seconds>();
     } else {
         LOCK(cs_main);
         mapBlockSource.erase(block->GetHash());
     }
 }
 
 void PeerManagerImpl::ProcessMessage(
     const Config &config, CNode &pfrom, const std::string &msg_type,
     CDataStream &vRecv, const std::chrono::microseconds time_received,
     const std::atomic<bool> &interruptMsgProc) {
     AssertLockHeld(g_msgproc_mutex);
 
     LogPrint(BCLog::NETDEBUG, "received: %s (%u bytes) peer=%d\n",
              SanitizeString(msg_type), vRecv.size(), pfrom.GetId());
 
     PeerRef peer = GetPeerRef(pfrom.GetId());
     if (peer == nullptr) {
         return;
     }
 
     if (IsAvalancheMessageType(msg_type)) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             LogPrint(BCLog::AVALANCHE,
                      "Avalanche is not initialized, ignoring %s message\n",
                      msg_type);
             return;
         }
 
         if (!isAvalancheEnabled(gArgs)) {
             // If avalanche is not enabled, ignore avalanche messages
             return;
         }
     }
 
     if (msg_type == NetMsgType::VERSION) {
         // Each connection can only send one version message
         if (pfrom.nVersion != 0) {
             Misbehaving(*peer, 1, "redundant version message");
             return;
         }
 
         int64_t nTime;
         CService addrMe;
         uint64_t nNonce = 1;
         ServiceFlags nServices;
         int nVersion;
         std::string cleanSubVer;
         int starting_height = -1;
         bool fRelay = true;
         uint64_t nExtraEntropy = 1;
 
         vRecv >> nVersion >> Using<CustomUintFormatter<8>>(nServices) >> nTime;
         if (nTime < 0) {
             nTime = 0;
         }
         // Ignore the addrMe service bits sent by the peer
         vRecv.ignore(8);
         vRecv >> addrMe;
         if (!pfrom.IsInboundConn()) {
             m_addrman.SetServices(pfrom.addr, nServices);
         }
         if (pfrom.ExpectServicesFromConn() &&
             !HasAllDesirableServiceFlags(nServices)) {
             LogPrint(BCLog::NET,
                      "peer=%d does not offer the expected services "
                      "(%08x offered, %08x expected); disconnecting\n",
                      pfrom.GetId(), nServices,
                      GetDesirableServiceFlags(nServices));
             pfrom.fDisconnect = true;
             return;
         }
 
         if (pfrom.IsAvalancheOutboundConnection() &&
             !(nServices & NODE_AVALANCHE)) {
             LogPrint(
                 BCLog::AVALANCHE,
                 "peer=%d does not offer the avalanche service; disconnecting\n",
                 pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         if (nVersion < MIN_PEER_PROTO_VERSION) {
             // disconnect from peers older than this proto version
             LogPrint(BCLog::NET,
                      "peer=%d using obsolete version %i; disconnecting\n",
                      pfrom.GetId(), nVersion);
             pfrom.fDisconnect = true;
             return;
         }
 
         if (!vRecv.empty()) {
             // The version message includes information about the sending node
             // which we don't use:
             //   - 8 bytes (service bits)
             //   - 16 bytes (ipv6 address)
             //   - 2 bytes (port)
             vRecv.ignore(26);
             vRecv >> nNonce;
         }
         if (!vRecv.empty()) {
             std::string strSubVer;
             vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
             cleanSubVer = SanitizeString(strSubVer);
         }
         if (!vRecv.empty()) {
             vRecv >> starting_height;
         }
         if (!vRecv.empty()) {
             vRecv >> fRelay;
         }
         if (!vRecv.empty()) {
             vRecv >> nExtraEntropy;
         }
         // Disconnect if we connected to ourself
         if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) {
             LogPrintf("connected to self at %s, disconnecting\n",
                       pfrom.addr.ToString());
             pfrom.fDisconnect = true;
             return;
         }
 
         if (pfrom.IsInboundConn() && addrMe.IsRoutable()) {
             SeenLocal(addrMe);
         }
 
         // Inbound peers send us their version message when they connect.
         // We send our version message in response.
         if (pfrom.IsInboundConn()) {
             PushNodeVersion(config, pfrom, *peer);
         }
 
         // Change version
         const int greatest_common_version =
             std::min(nVersion, PROTOCOL_VERSION);
         pfrom.SetCommonVersion(greatest_common_version);
         pfrom.nVersion = nVersion;
 
         const CNetMsgMaker msg_maker(greatest_common_version);
 
         m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
 
         // Signal ADDRv2 support (BIP155).
         m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
 
         pfrom.m_has_all_wanted_services =
             HasAllDesirableServiceFlags(nServices);
         peer->m_their_services = nServices;
         pfrom.SetAddrLocal(addrMe);
         {
             LOCK(pfrom.m_subver_mutex);
             pfrom.cleanSubVer = cleanSubVer;
         }
         peer->m_starting_height = starting_height;
 
         // We only initialize the m_tx_relay data structure if:
         // - this isn't an outbound block-relay-only connection; and
         // - fRelay=true or we're offering NODE_BLOOM to this peer
         //   (NODE_BLOOM means that the peer may turn on tx relay later)
         if (!pfrom.IsBlockOnlyConn() &&
             (fRelay || (peer->m_our_services & NODE_BLOOM))) {
             auto *const tx_relay = peer->SetTxRelay();
             {
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 // set to true after we get the first filter* message
                 tx_relay->m_relay_txs = fRelay;
             }
             if (fRelay) {
                 pfrom.m_relays_txs = true;
             }
         }
 
         pfrom.nRemoteHostNonce = nNonce;
         pfrom.nRemoteExtraEntropy = nExtraEntropy;
 
         // Potentially mark this peer as a preferred download peer.
         {
             LOCK(cs_main);
             CNodeState *state = State(pfrom.GetId());
             state->fPreferredDownload =
                 (!pfrom.IsInboundConn() ||
                  pfrom.HasPermission(NetPermissionFlags::NoBan)) &&
                 !pfrom.IsAddrFetchConn() && CanServeBlocks(*peer);
             m_num_preferred_download_peers += state->fPreferredDownload;
         }
 
         // Self advertisement & GETADDR logic
         if (!pfrom.IsInboundConn() && SetupAddressRelay(pfrom, *peer)) {
             // For outbound peers, we try to relay our address (so that other
             // nodes can try to find us more quickly, as we have no guarantee
             // that an outbound peer is even aware of how to reach us) and do a
             // one-time address fetch (to help populate/update our addrman). If
             // we're starting up for the first time, our addrman may be pretty
             // empty and no one will know who we are, so these mechanisms are
             // important to help us connect to the network.
             //
             // We skip this for block-relay-only peers. We want to avoid
             // potentially leaking addr information and we do not want to
             // indicate to the peer that we will participate in addr relay.
             if (fListen &&
                 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                 CAddress addr{GetLocalAddress(pfrom.addr), peer->m_our_services,
                               AdjustedTime()};
                 FastRandomContext insecure_rand;
                 if (addr.IsRoutable()) {
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     PushAddress(*peer, addr, insecure_rand);
                 } else if (IsPeerAddrLocalGood(&pfrom)) {
                     // Override just the address with whatever the peer sees us
                     // as. Leave the port in addr as it was returned by
                     // GetLocalAddress() above, as this is an outbound
                     // connection and the peer cannot observe our listening
                     // port.
                     addr.SetIP(addrMe);
                     LogPrint(BCLog::NET,
                              "ProcessMessages: advertising address %s\n",
                              addr.ToString());
                     PushAddress(*peer, addr, insecure_rand);
                 }
             }
 
             // Get recent addresses
             m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version)
                                               .Make(NetMsgType::GETADDR));
             peer->m_getaddr_sent = true;
             // When requesting a getaddr, accept an additional MAX_ADDR_TO_SEND
             // addresses in response (bypassing the
             // MAX_ADDR_PROCESSING_TOKEN_BUCKET limit).
             WITH_LOCK(peer->m_addr_token_bucket_mutex,
                       peer->m_addr_token_bucket += GetMaxAddrToSend());
         }
 
         if (!pfrom.IsInboundConn()) {
             // For non-inbound connections, we update the addrman to record
             // connection success so that addrman will have an up-to-date
             // notion of which peers are online and available.
             //
             // While we strive to not leak information about block-relay-only
             // connections via the addrman, not moving an address to the tried
             // table is also potentially detrimental because new-table entries
             // are subject to eviction in the event of addrman collisions.  We
             // mitigate the information-leak by never calling
             // AddrMan::Connected() on block-relay-only peers; see
             // FinalizeNode().
             //
             // This moves an address from New to Tried table in Addrman,
             // resolves tried-table collisions, etc.
             m_addrman.Good(pfrom.addr);
         }
 
         std::string remoteAddr;
         if (fLogIPs) {
             remoteAddr = ", peeraddr=" + pfrom.addr.ToString();
         }
 
         LogPrint(BCLog::NET,
                  "receive version message: [%s] %s: version %d, blocks=%d, "
                  "us=%s, txrelay=%d, peer=%d%s\n",
                  pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion,
                  peer->m_starting_height, addrMe.ToString(), fRelay,
                  pfrom.GetId(), remoteAddr);
 
         int64_t currentTime = GetTime();
         int64_t nTimeOffset = nTime - currentTime;
         pfrom.nTimeOffset = nTimeOffset;
         if (nTime < int64_t(m_chainparams.GenesisBlock().nTime)) {
             // Ignore time offsets that are improbable (before the Genesis
             // block) and may underflow our adjusted time.
             Misbehaving(*peer, 20,
                         "Ignoring invalid timestamp in version message");
         } else if (!pfrom.IsInboundConn()) {
             // Don't use timedata samples from inbound peers to make it
             // harder for others to tamper with our adjusted time.
             AddTimeData(pfrom.addr, nTimeOffset);
         }
 
         // Feeler connections exist only to verify if address is online.
         if (pfrom.IsFeelerConn()) {
             LogPrint(BCLog::NET,
                      "feeler connection completed peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
         }
         return;
     }
 
     if (pfrom.nVersion == 0) {
         // Must have a version message before anything else
         Misbehaving(*peer, 10, "non-version message before version handshake");
         return;
     }
 
     // At this point, the outgoing message serialization version can't change.
     const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
 
     if (msg_type == NetMsgType::VERACK) {
         if (pfrom.fSuccessfullyConnected) {
             LogPrint(BCLog::NET,
                      "ignoring redundant verack message from peer=%d\n",
                      pfrom.GetId());
             return;
         }
 
         if (!pfrom.IsInboundConn()) {
             LogPrintf(
                 "New outbound peer connected: version: %d, blocks=%d, "
                 "peer=%d%s (%s)\n",
                 pfrom.nVersion.load(), peer->m_starting_height, pfrom.GetId(),
                 (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString())
                          : ""),
                 pfrom.ConnectionTypeAsString());
         }
 
         if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) {
             // Tell our peer we are willing to provide version 1
             // cmpctblocks. However, we do not request new block announcements
             // using cmpctblock messages. We send this to non-NODE NETWORK peers
             // as well, because they may wish to request compact blocks from us.
             m_connman.PushMessage(
                 &pfrom,
                 msgMaker.Make(NetMsgType::SENDCMPCT, /*high_bandwidth=*/false,
                               /*version=*/CMPCTBLOCKS_VERSION));
         }
 
-        if (g_avalanche && isAvalancheEnabled(gArgs)) {
-            if (g_avalanche->sendHello(&pfrom)) {
-                auto localProof = g_avalanche->getLocalProof();
+        if (m_avalanche && isAvalancheEnabled(gArgs)) {
+            if (m_avalanche->sendHello(&pfrom)) {
+                auto localProof = m_avalanche->getLocalProof();
 
                 if (localProof) {
                     AddKnownProof(*peer, localProof->getId());
                     // Add our proof id to the list or the recently announced
                     // proof INVs to this peer. This is used for filtering which
                     // INV can be requested for download.
                     peer->m_proof_relay->m_recently_announced_proofs.insert(
                         localProof->getId());
                 }
             }
         }
 
         pfrom.fSuccessfullyConnected = true;
         return;
     }
 
     if (!pfrom.fSuccessfullyConnected) {
         // Must have a verack message before anything else
         Misbehaving(*peer, 10, "non-verack message before version handshake");
         return;
     }
 
     if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) {
         int stream_version = vRecv.GetVersion();
         if (msg_type == NetMsgType::ADDRV2) {
             // Add ADDRV2_FORMAT to the version so that the CNetAddr and
             // CAddress unserialize methods know that an address in v2 format is
             // coming.
             stream_version |= ADDRV2_FORMAT;
         }
 
         OverrideStream<CDataStream> s(&vRecv, vRecv.GetType(), stream_version);
         std::vector<CAddress> vAddr;
 
         s >> vAddr;
 
         if (!SetupAddressRelay(pfrom, *peer)) {
             LogPrint(BCLog::NET, "ignoring %s message from %s peer=%d\n",
                      msg_type, pfrom.ConnectionTypeAsString(), pfrom.GetId());
             return;
         }
 
         if (vAddr.size() > GetMaxAddrToSend()) {
             Misbehaving(
                 *peer, 20,
                 strprintf("%s message size = %u", msg_type, vAddr.size()));
             return;
         }
 
         // Store the new addresses
         std::vector<CAddress> vAddrOk;
         const auto current_a_time{AdjustedTime()};
 
         // Update/increment addr rate limiting bucket.
         const auto current_time = GetTime<std::chrono::microseconds>();
         {
             LOCK(peer->m_addr_token_bucket_mutex);
             if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
                 // Don't increment bucket if it's already full
                 const auto time_diff =
                     std::max(current_time - peer->m_addr_token_timestamp, 0us);
                 const double increment =
                     CountSecondsDouble(time_diff) * MAX_ADDR_RATE_PER_SECOND;
                 peer->m_addr_token_bucket =
                     std::min<double>(peer->m_addr_token_bucket + increment,
                                      MAX_ADDR_PROCESSING_TOKEN_BUCKET);
             }
         }
         peer->m_addr_token_timestamp = current_time;
 
         const bool rate_limited =
             !pfrom.HasPermission(NetPermissionFlags::Addr);
         uint64_t num_proc = 0;
         uint64_t num_rate_limit = 0;
         Shuffle(vAddr.begin(), vAddr.end(), FastRandomContext());
         for (CAddress &addr : vAddr) {
             if (interruptMsgProc) {
                 return;
             }
 
             {
                 LOCK(peer->m_addr_token_bucket_mutex);
                 // Apply rate limiting.
                 if (peer->m_addr_token_bucket < 1.0) {
                     if (rate_limited) {
                         ++num_rate_limit;
                         continue;
                     }
                 } else {
                     peer->m_addr_token_bucket -= 1.0;
                 }
             }
 
             // We only bother storing full nodes, though this may include things
             // which we would not make an outbound connection to, in part
             // because we may make feeler connections to them.
             if (!MayHaveUsefulAddressDB(addr.nServices) &&
                 !HasAllDesirableServiceFlags(addr.nServices)) {
                 continue;
             }
 
             if (addr.nTime <= NodeSeconds{100000000s} ||
                 addr.nTime > current_a_time + 10min) {
                 addr.nTime = current_a_time - 5 * 24h;
             }
             AddAddressKnown(*peer, addr);
             if (m_banman &&
                 (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) {
                 // Do not process banned/discouraged addresses beyond
                 // remembering we received them
                 continue;
             }
             ++num_proc;
             bool fReachable = IsReachable(addr);
             if (addr.nTime > current_a_time - 10min && !peer->m_getaddr_sent &&
                 vAddr.size() <= 10 && addr.IsRoutable()) {
                 // Relay to a limited number of other nodes
                 RelayAddress(pfrom.GetId(), addr, fReachable);
             }
             // Do not store addresses outside our network
             if (fReachable) {
                 vAddrOk.push_back(addr);
             }
         }
         peer->m_addr_processed += num_proc;
         peer->m_addr_rate_limited += num_rate_limit;
         LogPrint(BCLog::NET,
                  "Received addr: %u addresses (%u processed, %u rate-limited) "
                  "from peer=%d\n",
                  vAddr.size(), num_proc, num_rate_limit, pfrom.GetId());
 
         m_addrman.Add(vAddrOk, pfrom.addr, 2h);
         if (vAddr.size() < 1000) {
             peer->m_getaddr_sent = false;
         }
 
         // AddrFetch: Require multiple addresses to avoid disconnecting on
         // self-announcements
         if (pfrom.IsAddrFetchConn() && vAddr.size() > 1) {
             LogPrint(BCLog::NET,
                      "addrfetch connection completed peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
         }
         return;
     }
 
     if (msg_type == NetMsgType::SENDADDRV2) {
         peer->m_wants_addrv2 = true;
         return;
     }
 
     if (msg_type == NetMsgType::SENDHEADERS) {
         peer->m_prefers_headers = true;
         return;
     }
 
     if (msg_type == NetMsgType::SENDCMPCT) {
         bool sendcmpct_hb{false};
         uint64_t sendcmpct_version{0};
         vRecv >> sendcmpct_hb >> sendcmpct_version;
 
         if (sendcmpct_version != CMPCTBLOCKS_VERSION) {
             return;
         }
 
         LOCK(cs_main);
         CNodeState *nodestate = State(pfrom.GetId());
         nodestate->m_provides_cmpctblocks = true;
         nodestate->m_requested_hb_cmpctblocks = sendcmpct_hb;
         // save whether peer selects us as BIP152 high-bandwidth peer
         // (receiving sendcmpct(1) signals high-bandwidth,
         // sendcmpct(0) low-bandwidth)
         pfrom.m_bip152_highbandwidth_from = sendcmpct_hb;
         return;
     }
 
     if (msg_type == NetMsgType::INV) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             Misbehaving(*peer, 20,
                         strprintf("inv message size = %u", vInv.size()));
             return;
         }
 
         // Reject tx INVs when the -blocksonly setting is enabled, or this is a
         // block-relay-only peer
         bool reject_tx_invs{m_ignore_incoming_txs || pfrom.IsBlockOnlyConn()};
 
         // Allow peers with relay permission to send data other than blocks
         // in blocks only mode
         if (pfrom.HasPermission(NetPermissionFlags::Relay)) {
             reject_tx_invs = false;
         }
 
         const auto current_time{GetTime<std::chrono::microseconds>()};
         std::optional<BlockHash> best_block;
 
         auto logInv = [&](const CInv &inv, bool fAlreadyHave) {
             LogPrint(BCLog::NET, "got inv: %s  %s peer=%d\n", inv.ToString(),
                      fAlreadyHave ? "have" : "new", pfrom.GetId());
         };
 
         for (CInv &inv : vInv) {
             if (interruptMsgProc) {
                 return;
             }
 
             if (inv.IsMsgBlk()) {
                 LOCK(cs_main);
                 const bool fAlreadyHave = AlreadyHaveBlock(BlockHash(inv.hash));
                 logInv(inv, fAlreadyHave);
 
                 const BlockHash hash{inv.hash};
                 UpdateBlockAvailability(pfrom.GetId(), hash);
                 if (!fAlreadyHave && !m_chainman.m_blockman.LoadingBlocks() &&
                     !IsBlockRequested(hash)) {
                     // Headers-first is the primary method of announcement on
                     // the network. If a node fell back to sending blocks by
                     // inv, it may be for a re-org, or because we haven't
                     // completed initial headers sync. The final block hash
                     // provided should be the highest, so send a getheaders and
                     // then fetch the blocks we need to catch up.
                     best_block = std::move(hash);
                 }
 
                 continue;
             }
 
             if (inv.IsMsgProof()) {
                 const avalanche::ProofId proofid(inv.hash);
                 const bool fAlreadyHave = AlreadyHaveProof(proofid);
                 logInv(inv, fAlreadyHave);
                 AddKnownProof(*peer, proofid);
 
-                if (!fAlreadyHave && g_avalanche && isAvalancheEnabled(gArgs) &&
+                if (!fAlreadyHave && m_avalanche && isAvalancheEnabled(gArgs) &&
                     !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                     const bool preferred = isPreferredDownloadPeer(pfrom);
 
                     LOCK(cs_proofrequest);
                     AddProofAnnouncement(pfrom, proofid, current_time,
                                          preferred);
                 }
                 continue;
             }
 
             if (inv.IsMsgTx()) {
                 LOCK(cs_main);
                 const TxId txid(inv.hash);
                 const bool fAlreadyHave = AlreadyHaveTx(txid);
                 logInv(inv, fAlreadyHave);
 
                 AddKnownTx(*peer, txid);
                 if (reject_tx_invs) {
                     LogPrint(BCLog::NET,
                              "transaction (%s) inv sent in violation of "
                              "protocol, disconnecting peer=%d\n",
                              txid.ToString(), pfrom.GetId());
                     pfrom.fDisconnect = true;
                     return;
                 } else if (!fAlreadyHave && !m_chainman.ActiveChainstate()
                                                  .IsInitialBlockDownload()) {
                     AddTxAnnouncement(pfrom, txid, current_time);
                 }
 
                 continue;
             }
 
             LogPrint(BCLog::NET,
                      "Unknown inv type \"%s\" received from peer=%d\n",
                      inv.ToString(), pfrom.GetId());
         }
 
         if (best_block) {
             // If we haven't started initial headers-sync with this peer, then
             // consider sending a getheaders now. On initial startup, there's a
             // reliability vs bandwidth tradeoff, where we are only trying to do
             // initial headers sync with one peer at a time, with a long
             // timeout (at which point, if the sync hasn't completed, we will
             // disconnect the peer and then choose another). In the meantime,
             // as new blocks are found, we are willing to add one new peer per
             // block to sync with as well, to sync quicker in the case where
             // our initial peer is unresponsive (but less bandwidth than we'd
             // use if we turned on sync with all peers).
             LOCK(::cs_main);
             CNodeState &state{*Assert(State(pfrom.GetId()))};
             if (state.fSyncStarted ||
                 (!peer->m_inv_triggered_getheaders_before_sync &&
                  *best_block != m_last_block_inv_triggering_headers_sync)) {
                 if (MaybeSendGetHeaders(
                         pfrom, GetLocator(m_chainman.m_best_header), *peer)) {
                     LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n",
                              m_chainman.m_best_header->nHeight,
                              best_block->ToString(), pfrom.GetId());
                 }
                 if (!state.fSyncStarted) {
                     peer->m_inv_triggered_getheaders_before_sync = true;
                     // Update the last block hash that triggered a new headers
                     // sync, so that we don't turn on headers sync with more
                     // than 1 new peer every new block.
                     m_last_block_inv_triggering_headers_sync = *best_block;
                 }
             }
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::GETDATA) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         if (vInv.size() > MAX_INV_SZ) {
             Misbehaving(*peer, 20,
                         strprintf("getdata message size = %u", vInv.size()));
             return;
         }
 
         LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n",
                  vInv.size(), pfrom.GetId());
 
         if (vInv.size() > 0) {
             LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n",
                      vInv[0].ToString(), pfrom.GetId());
         }
 
         {
             LOCK(peer->m_getdata_requests_mutex);
             peer->m_getdata_requests.insert(peer->m_getdata_requests.end(),
                                             vInv.begin(), vInv.end());
             ProcessGetData(config, pfrom, *peer, interruptMsgProc);
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::GETBLOCKS) {
         CBlockLocator locator;
         uint256 hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getblocks locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         // We might have announced the currently-being-connected tip using a
         // compact block, which resulted in the peer sending a getblocks
         // request, which we would otherwise respond to without the new block.
         // To avoid this situation we simply verify that we are on our best
         // known chain now. This is super overkill, but we handle it better
         // for getheaders requests, and there are no known nodes which support
         // compact blocks but still use getblocks to request blocks.
         {
             std::shared_ptr<const CBlock> a_recent_block;
             {
                 LOCK(m_most_recent_block_mutex);
                 a_recent_block = m_most_recent_block;
             }
             BlockValidationState state;
             if (!m_chainman.ActiveChainstate().ActivateBestChain(
                     state, a_recent_block)) {
                 LogPrint(BCLog::NET, "failed to activate chain (%s)\n",
                          state.ToString());
             }
         }
 
         LOCK(cs_main);
 
         // Find the last block the caller has in the main chain
         const CBlockIndex *pindex =
             m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
 
         // Send the rest of the chain
         if (pindex) {
             pindex = m_chainman.ActiveChain().Next(pindex);
         }
         int nLimit = 500;
         LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit,
                  pfrom.GetId());
         for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
             if (pindex->GetBlockHash() == hashStop) {
                 LogPrint(BCLog::NET, "  getblocks stopping at %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             // If pruning, don't inv blocks unless we have on disk and are
             // likely to still have for some reasonable time window (1 hour)
             // that block relay might require.
             const int nPrunedBlocksLikelyToHave =
                 MIN_BLOCKS_TO_KEEP -
                 3600 / m_chainparams.GetConsensus().nPowTargetSpacing;
             if (m_chainman.m_blockman.IsPruneMode() &&
                 (!pindex->nStatus.hasData() ||
                  pindex->nHeight <= m_chainman.ActiveChain().Tip()->nHeight -
                                         nPrunedBlocksLikelyToHave)) {
                 LogPrint(
                     BCLog::NET,
                     " getblocks stopping, pruned or too old block at %d %s\n",
                     pindex->nHeight, pindex->GetBlockHash().ToString());
                 break;
             }
             WITH_LOCK(
                 peer->m_block_inv_mutex,
                 peer->m_blocks_for_inv_relay.push_back(pindex->GetBlockHash()));
             if (--nLimit <= 0) {
                 // When this block is requested, we'll send an inv that'll
                 // trigger the peer to getblocks the next batch of inventory.
                 LogPrint(BCLog::NET, "  getblocks stopping at limit %d %s\n",
                          pindex->nHeight, pindex->GetBlockHash().ToString());
                 WITH_LOCK(peer->m_block_inv_mutex, {
                     peer->m_continuation_block = pindex->GetBlockHash();
                 });
                 break;
             }
         }
         return;
     }
 
     if (msg_type == NetMsgType::GETBLOCKTXN) {
         BlockTransactionsRequest req;
         vRecv >> req;
 
         std::shared_ptr<const CBlock> recent_block;
         {
             LOCK(m_most_recent_block_mutex);
             if (m_most_recent_block_hash == req.blockhash) {
                 recent_block = m_most_recent_block;
             }
             // Unlock m_most_recent_block_mutex to avoid cs_main lock inversion
         }
         if (recent_block) {
             SendBlockTransactions(pfrom, *peer, *recent_block, req);
             return;
         }
 
         {
             LOCK(cs_main);
 
             const CBlockIndex *pindex =
                 m_chainman.m_blockman.LookupBlockIndex(req.blockhash);
             if (!pindex || !pindex->nStatus.hasData()) {
                 LogPrint(
                     BCLog::NET,
                     "Peer %d sent us a getblocktxn for a block we don't have\n",
                     pfrom.GetId());
                 return;
             }
 
             if (pindex->nHeight >=
                 m_chainman.ActiveChain().Height() - MAX_BLOCKTXN_DEPTH) {
                 CBlock block;
                 const bool ret{
                     m_chainman.m_blockman.ReadBlockFromDisk(block, *pindex)};
                 assert(ret);
 
                 SendBlockTransactions(pfrom, *peer, block, req);
                 return;
             }
         }
 
         // If an older block is requested (should never happen in practice,
         // but can happen in tests) send a block response instead of a
         // blocktxn response. Sending a full block response instead of a
         // small blocktxn response is preferable in the case where a peer
         // might maliciously send lots of getblocktxn requests to trigger
         // expensive disk reads, because it will require the peer to
         // actually receive all the data read from disk over the network.
         LogPrint(BCLog::NET,
                  "Peer %d sent us a getblocktxn for a block > %i deep\n",
                  pfrom.GetId(), MAX_BLOCKTXN_DEPTH);
         CInv inv;
         inv.type = MSG_BLOCK;
         inv.hash = req.blockhash;
         WITH_LOCK(peer->m_getdata_requests_mutex,
                   peer->m_getdata_requests.push_back(inv));
         // The message processing loop will go around again (without pausing)
         // and we'll respond then (without cs_main)
         return;
     }
 
     if (msg_type == NetMsgType::GETHEADERS) {
         CBlockLocator locator;
         BlockHash hashStop;
         vRecv >> locator >> hashStop;
 
         if (locator.vHave.size() > MAX_LOCATOR_SZ) {
             LogPrint(BCLog::NET,
                      "getheaders locator size %lld > %d, disconnect peer=%d\n",
                      locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         if (m_chainman.m_blockman.LoadingBlocks()) {
             LogPrint(
                 BCLog::NET,
                 "Ignoring getheaders from peer=%d while importing/reindexing\n",
                 pfrom.GetId());
             return;
         }
 
         LOCK(cs_main);
 
         // Note that if we were to be on a chain that forks from the
         // checkpointed chain, then serving those headers to a peer that has
         // seen the checkpointed chain would cause that peer to disconnect us.
         // Requiring that our chainwork exceed the minimum chainwork is a
         // protection against being fed a bogus chain when we started up for
         // the first time and getting partitioned off the honest network for
         // serving that chain to others.
         if (m_chainman.ActiveTip() == nullptr ||
             (m_chainman.ActiveTip()->nChainWork <
                  m_chainman.MinimumChainWork() &&
              !pfrom.HasPermission(NetPermissionFlags::Download))) {
             LogPrint(BCLog::NET,
                      "Ignoring getheaders from peer=%d because active chain "
                      "has too little work; sending empty response\n",
                      pfrom.GetId());
             // Just respond with an empty headers message, to tell the peer to
             // go away but not treat us as unresponsive.
             m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS,
                                                         std::vector<CBlock>()));
             return;
         }
 
         CNodeState *nodestate = State(pfrom.GetId());
         const CBlockIndex *pindex = nullptr;
         if (locator.IsNull()) {
             // If locator is null, return the hashStop block
             pindex = m_chainman.m_blockman.LookupBlockIndex(hashStop);
             if (!pindex) {
                 return;
             }
 
             if (!BlockRequestAllowed(pindex)) {
                 LogPrint(BCLog::NET,
                          "%s: ignoring request from peer=%i for old block "
                          "header that isn't in the main chain\n",
                          __func__, pfrom.GetId());
                 return;
             }
         } else {
             // Find the last block the caller has in the main chain
             pindex =
                 m_chainman.ActiveChainstate().FindForkInGlobalIndex(locator);
             if (pindex) {
                 pindex = m_chainman.ActiveChain().Next(pindex);
             }
         }
 
         // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx
         // count at the end
         std::vector<CBlock> vHeaders;
         int nLimit = MAX_HEADERS_RESULTS;
         LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n",
                  (pindex ? pindex->nHeight : -1),
                  hashStop.IsNull() ? "end" : hashStop.ToString(),
                  pfrom.GetId());
         for (; pindex; pindex = m_chainman.ActiveChain().Next(pindex)) {
             vHeaders.push_back(pindex->GetBlockHeader());
             if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) {
                 break;
             }
         }
         // pindex can be nullptr either if we sent
         // m_chainman.ActiveChain().Tip() OR if our peer has
         // m_chainman.ActiveChain().Tip() (and thus we are sending an empty
         // headers message). In both cases it's safe to update
         // pindexBestHeaderSent to be our tip.
         //
         // It is important that we simply reset the BestHeaderSent value here,
         // and not max(BestHeaderSent, newHeaderSent). We might have announced
         // the currently-being-connected tip using a compact block, which
         // resulted in the peer sending a headers request, which we respond to
         // without the new block. By resetting the BestHeaderSent, we ensure we
         // will re-announce the new block via headers (or compact blocks again)
         // in the SendMessages logic.
         nodestate->pindexBestHeaderSent =
             pindex ? pindex : m_chainman.ActiveChain().Tip();
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::HEADERS, vHeaders));
         return;
     }
 
     if (msg_type == NetMsgType::TX) {
         // Stop processing the transaction early if
         // 1) We are in blocks only mode and peer has no relay permission; OR
         // 2) This peer is a block-relay-only peer
         if ((m_ignore_incoming_txs &&
              !pfrom.HasPermission(NetPermissionFlags::Relay)) ||
             pfrom.IsBlockOnlyConn()) {
             LogPrint(BCLog::NET,
                      "transaction sent in violation of protocol peer=%d\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
 
         CTransactionRef ptx;
         vRecv >> ptx;
         const CTransaction &tx = *ptx;
         const TxId &txid = tx.GetId();
         AddKnownTx(*peer, txid);
 
         LOCK(cs_main);
 
         m_txrequest.ReceivedResponse(pfrom.GetId(), txid);
 
         if (AlreadyHaveTx(txid)) {
             if (pfrom.HasPermission(NetPermissionFlags::ForceRelay)) {
                 // Always relay transactions received from peers with
                 // forcerelay permission, even if they were already in the
                 // mempool, allowing the node to function as a gateway for
                 // nodes hidden behind it.
                 if (!m_mempool.exists(tx.GetId())) {
                     LogPrintf("Not relaying non-mempool transaction %s from "
                               "forcerelay peer=%d\n",
                               tx.GetId().ToString(), pfrom.GetId());
                 } else {
                     LogPrintf("Force relaying tx %s from peer=%d\n",
                               tx.GetId().ToString(), pfrom.GetId());
                     RelayTransaction(tx.GetId());
                 }
             }
             return;
         }
 
         const MempoolAcceptResult result = m_chainman.ProcessTransaction(ptx);
         const TxValidationState &state = result.m_state;
 
         if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
             // As this version of the transaction was acceptable, we can forget
             // about any requests for it.
             m_txrequest.ForgetInvId(tx.GetId());
             RelayTransaction(tx.GetId());
             m_orphanage.AddChildrenToWorkSet(tx);
 
             pfrom.m_last_tx_time = GetTime<std::chrono::seconds>();
 
             LogPrint(BCLog::MEMPOOL,
                      "AcceptToMemoryPool: peer=%d: accepted %s "
                      "(poolsz %u txn, %u kB)\n",
                      pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(),
                      m_mempool.DynamicMemoryUsage() / 1000);
         } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) {
             // It may be the case that the orphans parents have all been
             // rejected.
             bool fRejectedParents = false;
 
             // Deduplicate parent txids, so that we don't have to loop over
             // the same parent txid more than once down below.
             std::vector<TxId> unique_parents;
             unique_parents.reserve(tx.vin.size());
             for (const CTxIn &txin : tx.vin) {
                 // We start with all parents, and then remove duplicates below.
                 unique_parents.push_back(txin.prevout.GetTxId());
             }
             std::sort(unique_parents.begin(), unique_parents.end());
             unique_parents.erase(
                 std::unique(unique_parents.begin(), unique_parents.end()),
                 unique_parents.end());
             for (const TxId &parent_txid : unique_parents) {
                 if (m_recent_rejects.contains(parent_txid)) {
                     fRejectedParents = true;
                     break;
                 }
             }
             if (!fRejectedParents) {
                 const auto current_time{GetTime<std::chrono::microseconds>()};
 
                 for (const TxId &parent_txid : unique_parents) {
                     // FIXME: MSG_TX should use a TxHash, not a TxId.
                     AddKnownTx(*peer, parent_txid);
                     if (!AlreadyHaveTx(parent_txid)) {
                         AddTxAnnouncement(pfrom, parent_txid, current_time);
                     }
                 }
 
                 if (m_orphanage.AddTx(ptx, pfrom.GetId())) {
                     AddToCompactExtraTransactions(ptx);
                 }
 
                 // Once added to the orphan pool, a tx is considered
                 // AlreadyHave, and we shouldn't request it anymore.
                 m_txrequest.ForgetInvId(tx.GetId());
 
                 // DoS prevention: do not allow m_orphanage to grow
                 // unbounded (see CVE-2012-3789)
                 unsigned int nMaxOrphanTx = (unsigned int)std::max(
                     int64_t(0),
                     gArgs.GetIntArg("-maxorphantx",
                                     DEFAULT_MAX_ORPHAN_TRANSACTIONS));
                 unsigned int nEvicted = m_orphanage.LimitOrphans(nMaxOrphanTx);
                 if (nEvicted > 0) {
                     LogPrint(BCLog::MEMPOOL,
                              "orphanage overflow, removed %u tx\n", nEvicted);
                 }
             } else {
                 LogPrint(BCLog::MEMPOOL,
                          "not keeping orphan with rejected parents %s\n",
                          tx.GetId().ToString());
                 // We will continue to reject this tx since it has rejected
                 // parents so avoid re-requesting it from other peers.
                 m_recent_rejects.insert(tx.GetId());
                 m_txrequest.ForgetInvId(tx.GetId());
             }
         } else {
             m_recent_rejects.insert(tx.GetId());
             m_txrequest.ForgetInvId(tx.GetId());
 
             if (RecursiveDynamicUsage(*ptx) < 100000) {
                 AddToCompactExtraTransactions(ptx);
             }
         }
 
         // If a tx has been detected by m_recent_rejects, we will have reached
         // this point and the tx will have been ignored. Because we haven't
         // submitted the tx to our mempool, we won't have computed a DoS
         // score for it or determined exactly why we consider it invalid.
         //
         // This means we won't penalize any peer subsequently relaying a DoSy
         // tx (even if we penalized the first peer who gave it to us) because
         // we have to account for m_recent_rejects showing false positives. In
         // other words, we shouldn't penalize a peer if we aren't *sure* they
         // submitted a DoSy tx.
         //
         // Note that m_recent_rejects doesn't just record DoSy or invalid
         // transactions, but any tx not accepted by the mempool, which may be
         // due to node policy (vs. consensus). So we can't blanket penalize a
         // peer simply for relaying a tx that our m_recent_rejects has caught,
         // regardless of false positives.
 
         if (state.IsInvalid()) {
             LogPrint(BCLog::MEMPOOLREJ,
                      "%s from peer=%d was not accepted: %s\n",
                      tx.GetHash().ToString(), pfrom.GetId(), state.ToString());
             MaybePunishNodeForTx(pfrom.GetId(), state);
         }
         return;
     }
 
     if (msg_type == NetMsgType::CMPCTBLOCK) {
         // Ignore cmpctblock received while importing
         if (m_chainman.m_blockman.LoadingBlocks()) {
             LogPrint(BCLog::NET,
                      "Unexpected cmpctblock message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         CBlockHeaderAndShortTxIDs cmpctblock;
         try {
             vRecv >> cmpctblock;
         } catch (std::ios_base::failure &e) {
             // This block has non contiguous or overflowing indexes
             Misbehaving(*peer, 100, "cmpctblock-bad-indexes");
             return;
         }
 
         bool received_new_header = false;
 
         {
             LOCK(cs_main);
 
             const CBlockIndex *prev_block =
                 m_chainman.m_blockman.LookupBlockIndex(
                     cmpctblock.header.hashPrevBlock);
             if (!prev_block) {
                 // Doesn't connect (or is genesis), instead of DoSing in
                 // AcceptBlockHeader, request deeper headers
                 if (!m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                     MaybeSendGetHeaders(
                         pfrom, GetLocator(m_chainman.m_best_header), *peer);
                 }
                 return;
             }
             if (prev_block->nChainWork +
                     CalculateHeadersWork({cmpctblock.header}) <
                 GetAntiDoSWorkThreshold()) {
                 // If we get a low-work header in a compact block, we can ignore
                 // it.
                 LogPrint(BCLog::NET,
                          "Ignoring low-work compact block from peer %d\n",
                          pfrom.GetId());
                 return;
             }
 
             if (!m_chainman.m_blockman.LookupBlockIndex(
                     cmpctblock.header.GetHash())) {
                 received_new_header = true;
             }
         }
 
         const CBlockIndex *pindex = nullptr;
         BlockValidationState state;
         if (!m_chainman.ProcessNewBlockHeaders({cmpctblock.header},
                                                /*min_pow_checked=*/true, state,
                                                &pindex)) {
             if (state.IsInvalid()) {
                 MaybePunishNodeForBlock(pfrom.GetId(), state,
                                         /*via_compact_block*/ true,
                                         "invalid header via cmpctblock");
                 return;
             }
         }
 
         // When we succeed in decoding a block's txids from a cmpctblock
         // message we typically jump to the BLOCKTXN handling code, with a
         // dummy (empty) BLOCKTXN message, to re-use the logic there in
         // completing processing of the putative block (without cs_main).
         bool fProcessBLOCKTXN = false;
         CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
 
         // If we end up treating this as a plain headers message, call that as
         // well
         // without cs_main.
         bool fRevertToHeaderProcessing = false;
 
         // Keep a CBlock for "optimistic" compactblock reconstructions (see
         // below)
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockReconstructed = false;
 
         {
             LOCK(cs_main);
             // If AcceptBlockHeader returned true, it set pindex
             assert(pindex);
             UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash());
 
             CNodeState *nodestate = State(pfrom.GetId());
 
             // If this was a new header with more work than our tip, update the
             // peer's last block announcement time
             if (received_new_header &&
                 pindex->nChainWork >
                     m_chainman.ActiveChain().Tip()->nChainWork) {
                 nodestate->m_last_block_announcement = GetTime();
             }
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator blockInFlightIt =
                     mapBlocksInFlight.find(pindex->GetBlockHash());
             bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
 
             if (pindex->nStatus.hasData()) {
                 // Nothing to do here
                 return;
             }
 
             if (pindex->nChainWork <=
                     m_chainman.ActiveChain()
                         .Tip()
                         ->nChainWork || // We know something better
                 pindex->nTx != 0) {
                 // We had this block at some point, but pruned it
                 if (fAlreadyInFlight) {
                     // We requested this block for some reason, but our mempool
                     // will probably be useless so we just grab the block via
                     // normal getdata.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     m_connman.PushMessage(
                         &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                 }
                 return;
             }
 
             // If we're not close to tip yet, give up and let parallel block
             // fetch work its magic.
             if (!fAlreadyInFlight && !CanDirectFetch()) {
                 return;
             }
 
             // We want to be a bit conservative just to be extra careful about
             // DoS possibilities in compact block processing...
             if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
                 if ((!fAlreadyInFlight && nodestate->nBlocksInFlight <
                                               MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
                     (fAlreadyInFlight &&
                      blockInFlightIt->second.first == pfrom.GetId())) {
                     std::list<QueuedBlock>::iterator *queuedBlockIt = nullptr;
                     if (!BlockRequested(config, pfrom.GetId(), *pindex,
                                         &queuedBlockIt)) {
                         if (!(*queuedBlockIt)->partialBlock) {
                             (*queuedBlockIt)
                                 ->partialBlock.reset(
                                     new PartiallyDownloadedBlock(config,
                                                                  &m_mempool));
                         } else {
                             // The block was already in flight using compact
                             // blocks from the same peer.
                             LogPrint(BCLog::NET, "Peer sent us compact block "
                                                  "we were already syncing!\n");
                             return;
                         }
                     }
 
                     PartiallyDownloadedBlock &partialBlock =
                         *(*queuedBlockIt)->partialBlock;
                     ReadStatus status =
                         partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status == READ_STATUS_INVALID) {
                         // Reset in-flight state in case Misbehaving does not
                         // result in a disconnect
                         RemoveBlockRequest(pindex->GetBlockHash());
                         Misbehaving(*peer, 100, "invalid compact block");
                         return;
                     } else if (status == READ_STATUS_FAILED) {
                         // Duplicate txindices, the block is now in-flight, so
                         // just request it.
                         std::vector<CInv> vInv(1);
                         vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                         m_connman.PushMessage(
                             &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                         return;
                     }
 
                     BlockTransactionsRequest req;
                     for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
                         if (!partialBlock.IsTxAvailable(i)) {
                             req.indices.push_back(i);
                         }
                     }
                     if (req.indices.empty()) {
                         // Dirty hack to jump to BLOCKTXN code (TODO: move
                         // message handling into their own functions)
                         BlockTransactions txn;
                         txn.blockhash = cmpctblock.header.GetHash();
                         blockTxnMsg << txn;
                         fProcessBLOCKTXN = true;
                     } else {
                         req.blockhash = pindex->GetBlockHash();
                         m_connman.PushMessage(
                             &pfrom,
                             msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
                     }
                 } else {
                     // This block is either already in flight from a different
                     // peer, or this peer has too many blocks outstanding to
                     // download from. Optimistically try to reconstruct anyway
                     // since we might be able to without any round trips.
                     PartiallyDownloadedBlock tempBlock(config, &m_mempool);
                     ReadStatus status =
                         tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
                     if (status != READ_STATUS_OK) {
                         // TODO: don't ignore failures
                         return;
                     }
                     std::vector<CTransactionRef> dummy;
                     status = tempBlock.FillBlock(*pblock, dummy);
                     if (status == READ_STATUS_OK) {
                         fBlockReconstructed = true;
                     }
                 }
             } else {
                 if (fAlreadyInFlight) {
                     // We requested this block, but its far into the future, so
                     // our mempool will probably be useless - request the block
                     // normally.
                     std::vector<CInv> vInv(1);
                     vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
                     m_connman.PushMessage(
                         &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
                     return;
                 } else {
                     // If this was an announce-cmpctblock, we want the same
                     // treatment as a header message.
                     fRevertToHeaderProcessing = true;
                 }
             }
         } // cs_main
 
         if (fProcessBLOCKTXN) {
             return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN,
                                   blockTxnMsg, time_received, interruptMsgProc);
         }
 
         if (fRevertToHeaderProcessing) {
             // Headers received from HB compact block peers are permitted to be
             // relayed before full validation (see BIP 152), so we don't want to
             // disconnect the peer if the header turns out to be for an invalid
             // block. Note that if a peer tries to build on an invalid chain,
             // that will be detected and the peer will be banned.
             return ProcessHeadersMessage(config, pfrom, *peer,
                                          {cmpctblock.header},
                                          /*via_compact_block=*/true);
         }
 
         if (fBlockReconstructed) {
             // If we got here, we were able to optimistically reconstruct a
             // block that is in flight from some other peer.
             {
                 LOCK(cs_main);
                 mapBlockSource.emplace(pblock->GetHash(),
                                        std::make_pair(pfrom.GetId(), false));
             }
             // Setting force_processing to true means that we bypass some of
             // our anti-DoS protections in AcceptBlock, which filters
             // unrequested blocks that might be trying to waste our resources
             // (eg disk space). Because we only try to reconstruct blocks when
             // we're close to caught up (via the CanDirectFetch() requirement
             // above, combined with the behavior of not requesting blocks until
             // we have a chain with at least the minimum chain work), and we
             // ignore compact blocks with less work than our tip, it is safe to
             // treat reconstructed compact blocks as having been requested.
             ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
                          /*min_pow_checked=*/true);
             // hold cs_main for CBlockIndex::IsValid()
             LOCK(cs_main);
             if (pindex->IsValid(BlockValidity::TRANSACTIONS)) {
                 // Clear download state for this block, which is in process from
                 // some other peer. We do this after calling. ProcessNewBlock so
                 // that a malleated cmpctblock announcement can't be used to
                 // interfere with block relay.
                 RemoveBlockRequest(pblock->GetHash());
             }
         }
         return;
     }
 
     if (msg_type == NetMsgType::BLOCKTXN) {
         // Ignore blocktxn received while importing
         if (m_chainman.m_blockman.LoadingBlocks()) {
             LogPrint(BCLog::NET,
                      "Unexpected blocktxn message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         BlockTransactions resp;
         vRecv >> resp;
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         bool fBlockRead = false;
         {
             LOCK(cs_main);
 
             std::map<BlockHash,
                      std::pair<NodeId, std::list<QueuedBlock>::iterator>>::
                 iterator it = mapBlocksInFlight.find(resp.blockhash);
             if (it == mapBlocksInFlight.end() ||
                 !it->second.second->partialBlock ||
                 it->second.first != pfrom.GetId()) {
                 LogPrint(BCLog::NET,
                          "Peer %d sent us block transactions for block "
                          "we weren't expecting\n",
                          pfrom.GetId());
                 return;
             }
 
             PartiallyDownloadedBlock &partialBlock =
                 *it->second.second->partialBlock;
             ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
             if (status == READ_STATUS_INVALID) {
                 // Reset in-flight state in case of Misbehaving does not
                 // result in a disconnect.
                 RemoveBlockRequest(resp.blockhash);
                 Misbehaving(
                     *peer, 100,
                     "invalid compact block/non-matching block transactions");
                 return;
             } else if (status == READ_STATUS_FAILED) {
                 // Might have collided, fall back to getdata now :(
                 std::vector<CInv> invs;
                 invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
                 m_connman.PushMessage(&pfrom,
                                       msgMaker.Make(NetMsgType::GETDATA, invs));
             } else {
                 // Block is either okay, or possibly we received
                 // READ_STATUS_CHECKBLOCK_FAILED.
                 // Note that CheckBlock can only fail for one of a few reasons:
                 // 1. bad-proof-of-work (impossible here, because we've already
                 //    accepted the header)
                 // 2. merkleroot doesn't match the transactions given (already
                 //    caught in FillBlock with READ_STATUS_FAILED, so
                 //    impossible here)
                 // 3. the block is otherwise invalid (eg invalid coinbase,
                 //    block is too big, too many sigChecks, etc).
                 // So if CheckBlock failed, #3 is the only possibility.
                 // Under BIP 152, we don't DoS-ban unless proof of work is
                 // invalid (we don't require all the stateless checks to have
                 // been run). This is handled below, so just treat this as
                 // though the block was successfully read, and rely on the
                 // handling in ProcessNewBlock to ensure the block index is
                 // updated, etc.
 
                 // it is now an empty pointer
                 RemoveBlockRequest(resp.blockhash);
                 fBlockRead = true;
                 // mapBlockSource is used for potentially punishing peers and
                 // updating which peers send us compact blocks, so the race
                 // between here and cs_main in ProcessNewBlock is fine.
                 // BIP 152 permits peers to relay compact blocks after
                 // validating the header only; we should not punish peers
                 // if the block turns out to be invalid.
                 mapBlockSource.emplace(resp.blockhash,
                                        std::make_pair(pfrom.GetId(), false));
             }
         } // Don't hold cs_main when we call into ProcessNewBlock
         if (fBlockRead) {
             // Since we requested this block (it was in mapBlocksInFlight),
             // force it to be processed, even if it would not be a candidate for
             // new tip (missing previous block, chain not long enough, etc)
             // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
             // disk-space attacks), but this should be safe due to the
             // protections in the compact block handler -- see related comment
             // in compact block optimistic reconstruction handling.
             ProcessBlock(config, pfrom, pblock, /*force_processing=*/true,
                          /*min_pow_checked=*/true);
         }
         return;
     }
 
     if (msg_type == NetMsgType::HEADERS) {
         // Ignore headers received while importing
         if (m_chainman.m_blockman.LoadingBlocks()) {
             LogPrint(BCLog::NET,
                      "Unexpected headers message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         // Assume that this is in response to any outstanding getheaders
         // request we may have sent, and clear out the time of our last request
         peer->m_last_getheaders_timestamp = {};
 
         std::vector<CBlockHeader> headers;
 
         // Bypass the normal CBlock deserialization, as we don't want to risk
         // deserializing 2000 full blocks.
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > MAX_HEADERS_RESULTS) {
             Misbehaving(*peer, 20,
                         strprintf("too-many-headers: headers message size = %u",
                                   nCount));
             return;
         }
         headers.resize(nCount);
         for (unsigned int n = 0; n < nCount; n++) {
             vRecv >> headers[n];
             // Ignore tx count; assume it is 0.
             ReadCompactSize(vRecv);
         }
 
         ProcessHeadersMessage(config, pfrom, *peer, std::move(headers),
                               /*via_compact_block=*/false);
 
         // Check if the headers presync progress needs to be reported to
         // validation. This needs to be done without holding the
         // m_headers_presync_mutex lock.
         if (m_headers_presync_should_signal.exchange(false)) {
             HeadersPresyncStats stats;
             {
                 LOCK(m_headers_presync_mutex);
                 auto it =
                     m_headers_presync_stats.find(m_headers_presync_bestpeer);
                 if (it != m_headers_presync_stats.end()) {
                     stats = it->second;
                 }
             }
             if (stats.second) {
                 m_chainman.ReportHeadersPresync(
                     stats.first, stats.second->first, stats.second->second);
             }
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::BLOCK) {
         // Ignore block received while importing
         if (m_chainman.m_blockman.LoadingBlocks()) {
             LogPrint(BCLog::NET,
                      "Unexpected block message received from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
         vRecv >> *pblock;
 
         LogPrint(BCLog::NET, "received block %s peer=%d\n",
                  pblock->GetHash().ToString(), pfrom.GetId());
 
         // Process all blocks from whitelisted peers, even if not requested,
         // unless we're still syncing with the network. Such an unrequested
         // block may still be processed, subject to the conditions in
         // AcceptBlock().
         bool forceProcessing =
             pfrom.HasPermission(NetPermissionFlags::NoBan) &&
             !m_chainman.ActiveChainstate().IsInitialBlockDownload();
         const BlockHash hash = pblock->GetHash();
         bool min_pow_checked = false;
         {
             LOCK(cs_main);
             // Always process the block if we requested it, since we may
             // need it even when it's not a candidate for a new best tip.
             forceProcessing = IsBlockRequested(hash);
             RemoveBlockRequest(hash);
             // mapBlockSource is only used for punishing peers and setting
             // which peers send us compact blocks, so the race between here and
             // cs_main in ProcessNewBlock is fine.
             mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true));
 
             // Check work on this block against our anti-dos thresholds.
             const CBlockIndex *prev_block =
                 m_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock);
             if (prev_block &&
                 prev_block->nChainWork +
                         CalculateHeadersWork({pblock->GetBlockHeader()}) >=
                     GetAntiDoSWorkThreshold()) {
                 min_pow_checked = true;
             }
         }
         ProcessBlock(config, pfrom, pblock, forceProcessing, min_pow_checked);
         return;
     }
 
     if (msg_type == NetMsgType::AVAHELLO) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             return;
         }
         {
             LOCK(pfrom.cs_avalanche_pubkey);
             if (pfrom.m_avalanche_pubkey.has_value()) {
                 LogPrint(
                     BCLog::AVALANCHE,
                     "Ignoring avahello from peer %d: already in our node set\n",
                     pfrom.GetId());
                 return;
             }
 
             avalanche::Delegation delegation;
             vRecv >> delegation;
 
             // A delegation with an all zero limited id indicates that the peer
             // has no proof, so we're done.
             if (delegation.getLimitedProofId() != uint256::ZERO) {
                 avalanche::DelegationState state;
                 CPubKey pubkey;
                 if (!delegation.verify(state, pubkey)) {
                     Misbehaving(*peer, 100, "invalid-delegation");
                     return;
                 }
                 pfrom.m_avalanche_pubkey = std::move(pubkey);
 
                 CHashWriter sighasher(SER_GETHASH, 0);
                 sighasher << delegation.getId();
                 sighasher << pfrom.nRemoteHostNonce;
                 sighasher << pfrom.GetLocalNonce();
                 sighasher << pfrom.nRemoteExtraEntropy;
                 sighasher << pfrom.GetLocalExtraEntropy();
 
                 SchnorrSig sig;
                 vRecv >> sig;
                 if (!(*pfrom.m_avalanche_pubkey)
                          .VerifySchnorr(sighasher.GetHash(), sig)) {
                     Misbehaving(*peer, 100, "invalid-avahello-signature");
                     return;
                 }
 
                 // If we don't know this proof already, add it to the tracker so
                 // it can be requested.
                 const avalanche::ProofId proofid(delegation.getProofId());
                 if (!AlreadyHaveProof(proofid)) {
                     const bool preferred = isPreferredDownloadPeer(pfrom);
                     LOCK(cs_proofrequest);
                     AddProofAnnouncement(pfrom, proofid,
                                          GetTime<std::chrono::microseconds>(),
                                          preferred);
                 }
 
                 // Don't check the return value. If it fails we probably don't
                 // know about the proof yet.
-                g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
+                m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
                     return pm.addNode(pfrom.GetId(), proofid);
                 });
             }
 
             pfrom.m_avalanche_enabled = true;
         }
 
         // Send getavaaddr and getavaproofs to our avalanche outbound or
         // manual connections
         if (!pfrom.IsInboundConn()) {
             m_connman.PushMessage(&pfrom,
                                   msgMaker.Make(NetMsgType::GETAVAADDR));
             WITH_LOCK(peer->m_addr_token_bucket_mutex,
                       peer->m_addr_token_bucket += GetMaxAddrToSend());
 
             if (peer->m_proof_relay &&
                 !m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
                 m_connman.PushMessage(&pfrom,
                                       msgMaker.Make(NetMsgType::GETAVAPROOFS));
                 peer->m_proof_relay->compactproofs_requested = true;
             }
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPOLL) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             return;
         }
         const auto now = Now<SteadyMilliseconds>();
         const int64_t cooldown =
             gArgs.GetIntArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN);
 
         const auto last_poll = pfrom.m_last_poll;
         pfrom.m_last_poll = now;
 
         if (now < last_poll + std::chrono::milliseconds(cooldown)) {
             LogPrint(BCLog::AVALANCHE,
                      "Ignoring repeated avapoll from peer %d: cooldown not "
                      "elapsed\n",
                      pfrom.GetId());
             return;
         }
 
-        const bool quorum_established = g_avalanche->isQuorumEstablished();
+        const bool quorum_established = m_avalanche->isQuorumEstablished();
 
         uint64_t round;
         Unserialize(vRecv, round);
 
         unsigned int nCount = ReadCompactSize(vRecv);
         if (nCount > AVALANCHE_MAX_ELEMENT_POLL) {
             Misbehaving(
                 *peer, 20,
                 strprintf("too-many-ava-poll: poll message size = %u", nCount));
             return;
         }
 
         std::vector<avalanche::Vote> votes;
         votes.reserve(nCount);
 
         for (unsigned int n = 0; n < nCount; n++) {
             CInv inv;
             vRecv >> inv;
 
             // Default vote for unknown inv type
             uint32_t vote = -1;
 
             // We don't vote definitively until we have an established quorum
             if (!quorum_established) {
                 votes.emplace_back(vote, inv.hash);
                 continue;
             }
 
             // If inv's type is known, get a vote for its hash
             switch (inv.type) {
                 case MSG_TX: {
                     if (gArgs.GetBoolArg("-avalanchepreconsensus",
                                          DEFAULT_AVALANCHE_PRECONSENSUS)) {
                         vote = WITH_LOCK(cs_main, return GetAvalancheVoteForTx(
                                                       TxId(inv.hash)));
                     }
                 } break;
                 case MSG_BLOCK: {
                     vote = WITH_LOCK(cs_main, return GetAvalancheVoteForBlock(
                                                   BlockHash(inv.hash)));
                 } break;
                 case MSG_AVA_PROOF: {
-                    vote =
-                        getAvalancheVoteForProof(avalanche::ProofId(inv.hash));
+                    vote = getAvalancheVoteForProof(
+                        *m_avalanche, avalanche::ProofId(inv.hash));
                 } break;
                 default: {
                     LogPrint(BCLog::AVALANCHE,
                              "poll inv type %d unknown from peer=%d\n",
                              inv.type, pfrom.GetId());
                 }
             }
 
             votes.emplace_back(vote, inv.hash);
         }
 
         // Send the query to the node.
-        g_avalanche->sendResponse(
+        m_avalanche->sendResponse(
             &pfrom, avalanche::Response(round, cooldown, std::move(votes)));
         return;
     }
 
     if (msg_type == NetMsgType::AVARESPONSE) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             return;
         }
         // As long as QUIC is not implemented, we need to sign response and
         // verify response's signatures in order to avoid any manipulation of
         // messages at the transport level.
         CHashVerifier<CDataStream> verifier(&vRecv);
         avalanche::Response response;
         verifier >> response;
 
         SchnorrSig sig;
         vRecv >> sig;
 
         {
             LOCK(pfrom.cs_avalanche_pubkey);
             if (!pfrom.m_avalanche_pubkey.has_value() ||
                 !(*pfrom.m_avalanche_pubkey)
                      .VerifySchnorr(verifier.GetHash(), sig)) {
                 Misbehaving(*peer, 100, "invalid-ava-response-signature");
                 return;
             }
         }
 
         auto now = GetTime<std::chrono::seconds>();
 
         std::vector<avalanche::VoteItemUpdate> updates;
         int banscore{0};
         std::string error;
-        if (!g_avalanche->registerVotes(pfrom.GetId(), response, updates,
+        if (!m_avalanche->registerVotes(pfrom.GetId(), response, updates,
                                         banscore, error)) {
             if (banscore > 0) {
                 // If the banscore was set, just increase the node ban score
                 Misbehaving(*peer, banscore, error);
                 return;
             }
 
             // Otherwise the node may have got a network issue. Increase the
             // fault counter instead and only ban if we reached a threshold.
             // This allows for fault tolerance should there be a temporary
             // outage while still preventing DoS'ing behaviors, as the counter
             // is reset if no fault occured over some time period.
             pfrom.m_avalanche_message_fault_counter++;
             pfrom.m_avalanche_last_message_fault = now;
 
             // Allow up to 12 messages before increasing the ban score. Since
             // the queries are cleared after 10s, this is at least 2 minutes
             // of network outage tolerance over the 1h window.
             if (pfrom.m_avalanche_message_fault_counter > 12) {
                 Misbehaving(*peer, 2, error);
                 return;
             }
         }
 
         // If no fault occurred within the last hour, reset the fault counter
         if (now > (pfrom.m_avalanche_last_message_fault.load() + 1h)) {
             pfrom.m_avalanche_message_fault_counter = 0;
         }
 
         pfrom.invsVoted(response.GetVotes().size());
 
         auto logVoteUpdate = [](const auto &voteUpdate,
                                 const std::string &voteItemTypeStr,
                                 const auto &voteItemId) {
             std::string voteOutcome;
             switch (voteUpdate.getStatus()) {
                 case avalanche::VoteStatus::Invalid:
                     voteOutcome = "invalidated";
                     break;
                 case avalanche::VoteStatus::Rejected:
                     voteOutcome = "rejected";
                     break;
                 case avalanche::VoteStatus::Accepted:
                     voteOutcome = "accepted";
                     break;
                 case avalanche::VoteStatus::Finalized:
                     voteOutcome = "finalized";
                     break;
                 case avalanche::VoteStatus::Stale:
                     voteOutcome = "stalled";
                     break;
 
                     // No default case, so the compiler can warn about missing
                     // cases
             }
 
             LogPrint(BCLog::AVALANCHE, "Avalanche %s %s %s\n", voteOutcome,
                      voteItemTypeStr, voteItemId.ToString());
         };
 
         bool shouldActivateBestChain = false;
 
         const bool fPreConsensus = gArgs.GetBoolArg(
             "-avalanchepreconsensus", DEFAULT_AVALANCHE_PRECONSENSUS);
 
         for (const auto &u : updates) {
             const avalanche::AnyVoteItem &item = u.getVoteItem();
 
             // Don't use a visitor here as we want to ignore unsupported item
             // types. This comes in handy when adding new types.
             if (auto pitem = std::get_if<const avalanche::ProofRef>(&item)) {
                 avalanche::ProofRef proof = *pitem;
                 const avalanche::ProofId &proofid = proof->getId();
 
                 logVoteUpdate(u, "proof", proofid);
 
                 auto rejectionMode =
                     avalanche::PeerManager::RejectionMode::DEFAULT;
                 auto nextCooldownTimePoint = GetTime<std::chrono::seconds>();
                 switch (u.getStatus()) {
                     case avalanche::VoteStatus::Invalid:
-                        g_avalanche->withPeerManager(
+                        m_avalanche->withPeerManager(
                             [&](avalanche::PeerManager &pm) {
                                 pm.setInvalid(proofid);
                             });
                         // Fallthrough
                     case avalanche::VoteStatus::Stale:
                         // Invalidate mode removes the proof from all proof
                         // pools
                         rejectionMode =
                             avalanche::PeerManager::RejectionMode::INVALIDATE;
                         // Fallthrough
                     case avalanche::VoteStatus::Rejected:
-                        if (!g_avalanche->withPeerManager(
+                        if (!m_avalanche->withPeerManager(
                                 [&](avalanche::PeerManager &pm) {
                                     return pm.rejectProof(proofid,
                                                           rejectionMode);
                                 })) {
                             LogPrint(BCLog::AVALANCHE,
                                      "ERROR: Failed to reject proof: %s\n",
                                      proofid.GetHex());
                         }
                         break;
                     case avalanche::VoteStatus::Finalized:
                         nextCooldownTimePoint +=
                             std::chrono::seconds(gArgs.GetIntArg(
                                 "-avalanchepeerreplacementcooldown",
                                 AVALANCHE_DEFAULT_PEER_REPLACEMENT_COOLDOWN));
                     case avalanche::VoteStatus::Accepted:
-                        if (!g_avalanche->withPeerManager(
+                        if (!m_avalanche->withPeerManager(
                                 [&](avalanche::PeerManager &pm) {
                                     pm.registerProof(
                                         proof,
                                         avalanche::PeerManager::
                                             RegistrationMode::FORCE_ACCEPT);
                                     return pm.forPeer(
                                         proofid,
                                         [&](const avalanche::Peer &peer) {
                                             pm.updateNextPossibleConflictTime(
                                                 peer.peerid,
                                                 nextCooldownTimePoint);
                                             if (u.getStatus() ==
                                                 avalanche::VoteStatus::
                                                     Finalized) {
                                                 pm.setFinalized(peer.peerid);
                                             }
                                             // Only fail if the peer was not
                                             // created
                                             return true;
                                         });
                                 })) {
                             LogPrint(BCLog::AVALANCHE,
                                      "ERROR: Failed to accept proof: %s\n",
                                      proofid.GetHex());
                         }
                         break;
                 }
             }
 
             if (auto pitem = std::get_if<const CBlockIndex *>(&item)) {
                 CBlockIndex *pindex = const_cast<CBlockIndex *>(*pitem);
 
                 shouldActivateBestChain = true;
 
                 logVoteUpdate(u, "block", pindex->GetBlockHash());
 
                 switch (u.getStatus()) {
                     case avalanche::VoteStatus::Invalid:
                     case avalanche::VoteStatus::Rejected: {
                         BlockValidationState state;
                         m_chainman.ActiveChainstate().ParkBlock(state, pindex);
                         if (!state.IsValid()) {
                             LogPrintf("ERROR: Database error: %s\n",
                                       state.GetRejectReason());
                             return;
                         }
                     } break;
                     case avalanche::VoteStatus::Accepted: {
                         LOCK(cs_main);
                         m_chainman.ActiveChainstate().UnparkBlock(pindex);
                     } break;
                     case avalanche::VoteStatus::Finalized: {
                         {
                             LOCK(cs_main);
                             m_chainman.ActiveChainstate().UnparkBlock(pindex);
                         }
 
                         if (fPreConsensus) {
                             // First check if the block is cached before reading
                             // from disk.
                             auto pblock = WITH_LOCK(m_most_recent_block_mutex,
                                                     return m_most_recent_block);
 
                             if (!pblock ||
                                 pblock->GetHash() != pindex->GetBlockHash()) {
                                 std::shared_ptr<CBlock> pblockRead =
                                     std::make_shared<CBlock>();
                                 if (!m_chainman.m_blockman.ReadBlockFromDisk(
                                         *pblockRead, *pindex)) {
                                     assert(!"cannot load block from disk");
                                 }
                                 pblock = pblockRead;
                             }
                             assert(pblock);
 
                             LOCK(m_mempool.cs);
                             m_mempool.removeForFinalizedBlock(pblock->vtx);
                         }
 
                         m_chainman.ActiveChainstate().AvalancheFinalizeBlock(
                             pindex);
                     } break;
                     case avalanche::VoteStatus::Stale:
                         // Fall back on Nakamoto consensus in the absence of
                         // Avalanche votes for other competing or descendant
                         // blocks.
                         break;
                 }
             }
 
             if (!fPreConsensus) {
                 continue;
             }
 
             if (auto pitem = std::get_if<const CTransactionRef>(&item)) {
                 const CTransactionRef tx = *pitem;
                 assert(tx != nullptr);
 
                 const TxId &txid = tx->GetId();
                 logVoteUpdate(u, "tx", txid);
 
                 switch (u.getStatus()) {
                     case avalanche::VoteStatus::Rejected:
                         break;
                     case avalanche::VoteStatus::Invalid: {
                         // Remove from the mempool and the finalized tree, as
                         // well as all the children txs.
                         // FIXME Remember the tx has been invalidated so we
                         // don't poll for it again and again.
                         LOCK(m_mempool.cs);
                         auto it = m_mempool.GetIter(txid);
                         if (it.has_value()) {
                             m_mempool.removeRecursive(
                                 *tx, MemPoolRemovalReason::AVALANCHE);
                         }
 
                         break;
                     }
                     case avalanche::VoteStatus::Accepted:
                         break;
                     case avalanche::VoteStatus::Finalized: {
                         LOCK(m_mempool.cs);
                         auto it = m_mempool.GetIter(txid);
                         if (!it.has_value()) {
                             LogPrint(BCLog::AVALANCHE,
                                      "Error: finalized tx (%s) is not in the "
                                      "mempool\n",
                                      txid.ToString());
                             break;
                         }
 
                         m_mempool.setAvalancheFinalized(**it);
 
                         break;
                     }
                     case avalanche::VoteStatus::Stale:
                         break;
                 }
             }
         }
 
         if (shouldActivateBestChain) {
             BlockValidationState state;
             if (!m_chainman.ActiveChainstate().ActivateBestChain(state)) {
                 LogPrintf("failed to activate chain (%s)\n", state.ToString());
             }
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPROOF) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             return;
         }
         auto proof = RCUPtr<avalanche::Proof>::make();
         vRecv >> *proof;
 
         ReceivedAvalancheProof(pfrom, *peer, proof);
 
         return;
     }
 
     if (msg_type == NetMsgType::GETAVAPROOFS) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             return;
         }
         if (peer->m_proof_relay == nullptr) {
             return;
         }
 
         peer->m_proof_relay->lastSharedProofsUpdate =
             GetTime<std::chrono::seconds>();
 
         peer->m_proof_relay->sharedProofs =
-            g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
+            m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
                 return pm.getShareableProofsSnapshot();
             });
 
         avalanche::CompactProofs compactProofs(
             peer->m_proof_relay->sharedProofs);
         m_connman.PushMessage(
             &pfrom, msgMaker.Make(NetMsgType::AVAPROOFS, compactProofs));
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPROOFS) {
-        if (!g_avalanche) {
+        if (!m_avalanche) {
             return;
         }
         if (peer->m_proof_relay == nullptr) {
             return;
         }
 
         // Only process the compact proofs if we requested them
         if (!peer->m_proof_relay->compactproofs_requested) {
             LogPrint(BCLog::AVALANCHE, "Ignoring unsollicited avaproofs\n");
             return;
         }
         peer->m_proof_relay->compactproofs_requested = false;
 
         avalanche::CompactProofs compactProofs;
         try {
             vRecv >> compactProofs;
         } catch (std::ios_base::failure &e) {
             // This compact proofs have non contiguous or overflowing indexes
             Misbehaving(*peer, 100, "avaproofs-bad-indexes");
             return;
         }
 
         // If there are prefilled proofs, process them first
         std::set<uint32_t> prefilledIndexes;
         for (const auto &prefilledProof : compactProofs.getPrefilledProofs()) {
             if (!ReceivedAvalancheProof(pfrom, *peer, prefilledProof.proof)) {
                 // If we got an invalid proof, the peer is getting banned and we
                 // can bail out.
                 return;
             }
         }
 
         // If there is no shortid, avoid parsing/responding/accounting for the
         // message.
         if (compactProofs.getShortIDs().size() == 0) {
             LogPrint(BCLog::AVALANCHE,
                      "Got an avaproofs message with no shortid (peer %d)\n",
                      pfrom.GetId());
             return;
         }
 
         // To determine the chance that the number of entries in a bucket
         // exceeds N, we use the fact that the number of elements in a single
         // bucket is binomially distributed (with n = the number of shorttxids
         // S, and p = 1 / the number of buckets), that in the worst case the
         // number of buckets is equal to S (due to std::unordered_map having a
         // default load factor of 1.0), and that the chance for any bucket to
         // exceed N elements is at most buckets * (the chance that any given
         // bucket is above N elements). Thus:
         //   P(max_elements_per_bucket > N) <=
         //     S * (1 - cdf(binomial(n=S,p=1/S), N))
         // If we assume up to 21000000, allowing 15 elements per bucket should
         // only fail once per ~2.5 million avaproofs transfers (per peer and
         // connection).
         // TODO re-evaluate the bucket count to a more realistic value.
         // TODO: In the case of a shortid-collision, we should request all the
         // proofs which collided. For now, we only request one, which is not
         // that bad considering this event is expected to be very rare.
         auto shortIdProcessor =
             avalanche::ProofShortIdProcessor(compactProofs.getPrefilledProofs(),
                                              compactProofs.getShortIDs(), 15);
 
         if (shortIdProcessor.hasOutOfBoundIndex()) {
             // This should be catched by deserialization, but catch it here as
             // well as a good measure.
             Misbehaving(*peer, 100, "avaproofs-bad-indexes");
             return;
         }
         if (!shortIdProcessor.isEvenlyDistributed()) {
             // This is suspicious, don't ban but bail out
             return;
         }
 
         size_t proofCount = 0;
         std::vector<std::pair<avalanche::ProofId, bool>> remoteProofsStatus;
-        g_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
+        m_avalanche->withPeerManager([&](const avalanche::PeerManager &pm) {
             pm.forEachPeer([&](const avalanche::Peer &peer) {
                 assert(peer.proof);
                 uint64_t shortid = compactProofs.getShortID(peer.getProofId());
 
                 int added =
                     shortIdProcessor.matchKnownItem(shortid, peer.proof);
 
                 // No collision
                 if (added >= 0) {
                     // Because we know the proof, we can determine if our peer
                     // has it (added = 1) or not (added = 0) and update the
                     // remote proof status accordingly.
                     remoteProofsStatus.emplace_back(peer.getProofId(),
                                                     added > 0);
                 }
 
                 proofCount += added;
 
                 // In order to properly determine which proof is missing, we
                 // need to keep scanning for all our proofs.
                 return true;
             });
         });
 
         avalanche::ProofsRequest req;
         for (size_t i = 0; i < compactProofs.size(); i++) {
             if (shortIdProcessor.getItem(i) == nullptr) {
                 req.indices.push_back(i);
             }
         }
 
         m_connman.PushMessage(&pfrom,
                               msgMaker.Make(NetMsgType::AVAPROOFSREQ, req));
 
         const NodeId nodeid = pfrom.GetId();
 
         // We want to keep a count of how many nodes we successfully requested
         // avaproofs from as this is used to determine when we are confident our
         // quorum is close enough to the other participants.
-        g_avalanche->avaproofsSent(nodeid);
+        m_avalanche->avaproofsSent(nodeid);
 
         // Only save remote proofs from stakers
         if (WITH_LOCK(pfrom.cs_avalanche_pubkey,
                       return pfrom.m_avalanche_pubkey.has_value())) {
-            g_avalanche->withPeerManager(
+            m_avalanche->withPeerManager(
                 [&remoteProofsStatus, nodeid](avalanche::PeerManager &pm) {
                     for (const auto &[proofid, present] : remoteProofsStatus) {
                         pm.saveRemoteProof(proofid, nodeid, present);
                     }
                 });
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::AVAPROOFSREQ) {
         if (peer->m_proof_relay == nullptr) {
             return;
         }
 
         avalanche::ProofsRequest proofreq;
         vRecv >> proofreq;
 
         auto requestedIndiceIt = proofreq.indices.begin();
         uint32_t treeIndice = 0;
         peer->m_proof_relay->sharedProofs.forEachLeaf([&](const auto &proof) {
             if (requestedIndiceIt == proofreq.indices.end()) {
                 // No more indice to process
                 return false;
             }
 
             if (treeIndice++ == *requestedIndiceIt) {
                 m_connman.PushMessage(
                     &pfrom, msgMaker.Make(NetMsgType::AVAPROOF, *proof));
                 requestedIndiceIt++;
             }
 
             return true;
         });
 
         peer->m_proof_relay->sharedProofs = {};
         return;
     }
 
     if (msg_type == NetMsgType::GETADDR) {
         // This asymmetric behavior for inbound and outbound connections was
         // introduced to prevent a fingerprinting attack: an attacker can send
         // specific fake addresses to users' AddrMan and later request them by
         // sending getaddr messages. Making nodes which are behind NAT and can
         // only make outgoing connections ignore the getaddr message mitigates
         // the attack.
         if (!pfrom.IsInboundConn()) {
             LogPrint(BCLog::NET,
                      "Ignoring \"getaddr\" from %s connection. peer=%d\n",
                      pfrom.ConnectionTypeAsString(), pfrom.GetId());
             return;
         }
 
         // Since this must be an inbound connection, SetupAddressRelay will
         // never fail.
         Assume(SetupAddressRelay(pfrom, *peer));
 
         // Only send one GetAddr response per connection to reduce resource
         // waste and discourage addr stamping of INV announcements.
         if (peer->m_getaddr_recvd) {
             LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n",
                      pfrom.GetId());
             return;
         }
         peer->m_getaddr_recvd = true;
 
         peer->m_addrs_to_send.clear();
         std::vector<CAddress> vAddr;
         const size_t maxAddrToSend = GetMaxAddrToSend();
         if (pfrom.HasPermission(NetPermissionFlags::Addr)) {
             vAddr = m_connman.GetAddresses(maxAddrToSend, MAX_PCT_ADDR_TO_SEND,
                                            /* network */ std::nullopt);
         } else {
             vAddr = m_connman.GetAddresses(pfrom, maxAddrToSend,
                                            MAX_PCT_ADDR_TO_SEND);
         }
         FastRandomContext insecure_rand;
         for (const CAddress &addr : vAddr) {
             PushAddress(*peer, addr, insecure_rand);
         }
         return;
     }
 
     if (msg_type == NetMsgType::GETAVAADDR) {
         auto now = GetTime<std::chrono::seconds>();
         if (now < pfrom.m_nextGetAvaAddr) {
             // Prevent a peer from exhausting our resources by spamming
             // getavaaddr messages.
             LogPrint(BCLog::AVALANCHE,
                      "Ignoring repeated getavaaddr from peer %d\n",
                      pfrom.GetId());
             return;
         }
 
         // Only accept a getavaaddr every GETAVAADDR_INTERVAL at most
         pfrom.m_nextGetAvaAddr = now + GETAVAADDR_INTERVAL;
 
         if (!SetupAddressRelay(pfrom, *peer)) {
             LogPrint(BCLog::AVALANCHE,
                      "Ignoring getavaaddr message from %s peer=%d\n",
                      pfrom.ConnectionTypeAsString(), pfrom.GetId());
             return;
         }
 
         auto availabilityScoreComparator = [](const CNode *lhs,
                                               const CNode *rhs) {
             double scoreLhs = lhs->getAvailabilityScore();
             double scoreRhs = rhs->getAvailabilityScore();
 
             if (scoreLhs != scoreRhs) {
                 return scoreLhs > scoreRhs;
             }
 
             return lhs < rhs;
         };
 
         // Get up to MAX_ADDR_TO_SEND addresses of the nodes which are the
         // most active in the avalanche network. Account for 0 availability as
         // well so we can send addresses even if we did not start polling yet.
         std::set<const CNode *, decltype(availabilityScoreComparator)> avaNodes(
             availabilityScoreComparator);
         m_connman.ForEachNode([&](const CNode *pnode) {
             if (!pnode->m_avalanche_enabled ||
                 pnode->getAvailabilityScore() < 0.) {
                 return;
             }
 
             avaNodes.insert(pnode);
             if (avaNodes.size() > GetMaxAddrToSend()) {
                 avaNodes.erase(std::prev(avaNodes.end()));
             }
         });
 
         peer->m_addrs_to_send.clear();
         FastRandomContext insecure_rand;
         for (const CNode *pnode : avaNodes) {
             PushAddress(*peer, pnode->addr, insecure_rand);
         }
 
         return;
     }
 
     if (msg_type == NetMsgType::MEMPOOL) {
         if (!(peer->m_our_services & NODE_BLOOM) &&
             !pfrom.HasPermission(NetPermissionFlags::Mempool)) {
             if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bloom filters disabled, "
                          "disconnect peer=%d\n",
                          pfrom.GetId());
                 pfrom.fDisconnect = true;
             }
             return;
         }
 
         if (m_connman.OutboundTargetReached(false) &&
             !pfrom.HasPermission(NetPermissionFlags::Mempool)) {
             if (!pfrom.HasPermission(NetPermissionFlags::NoBan)) {
                 LogPrint(BCLog::NET,
                          "mempool request with bandwidth limit reached, "
                          "disconnect peer=%d\n",
                          pfrom.GetId());
                 pfrom.fDisconnect = true;
             }
             return;
         }
 
         if (auto tx_relay = peer->GetTxRelay()) {
             LOCK(tx_relay->m_tx_inventory_mutex);
             tx_relay->m_send_mempool = true;
         }
         return;
     }
 
     if (msg_type == NetMsgType::PING) {
         if (pfrom.GetCommonVersion() > BIP0031_VERSION) {
             uint64_t nonce = 0;
             vRecv >> nonce;
             // Echo the message back with the nonce. This allows for two useful
             // features:
             //
             // 1) A remote node can quickly check if the connection is
             // operational.
             // 2) Remote nodes can measure the latency of the network thread. If
             // this node is overloaded it won't respond to pings quickly and the
             // remote node can avoid sending us more work, like chain download
             // requests.
             //
             // The nonce stops the remote getting confused between different
             // pings: without it, if the remote node sends a ping once per
             // second and this node takes 5 seconds to respond to each, the 5th
             // ping the remote sends would appear to return very quickly.
             m_connman.PushMessage(&pfrom,
                                   msgMaker.Make(NetMsgType::PONG, nonce));
         }
         return;
     }
 
     if (msg_type == NetMsgType::PONG) {
         const auto ping_end = time_received;
         uint64_t nonce = 0;
         size_t nAvail = vRecv.in_avail();
         bool bPingFinished = false;
         std::string sProblem;
 
         if (nAvail >= sizeof(nonce)) {
             vRecv >> nonce;
 
             // Only process pong message if there is an outstanding ping (old
             // ping without nonce should never pong)
             if (peer->m_ping_nonce_sent != 0) {
                 if (nonce == peer->m_ping_nonce_sent) {
                     // Matching pong received, this ping is no longer
                     // outstanding
                     bPingFinished = true;
                     const auto ping_time = ping_end - peer->m_ping_start.load();
                     if (ping_time.count() >= 0) {
                         // Let connman know about this successful ping-pong
                         pfrom.PongReceived(ping_time);
                     } else {
                         // This should never happen
                         sProblem = "Timing mishap";
                     }
                 } else {
                     // Nonce mismatches are normal when pings are overlapping
                     sProblem = "Nonce mismatch";
                     if (nonce == 0) {
                         // This is most likely a bug in another implementation
                         // somewhere; cancel this ping
                         bPingFinished = true;
                         sProblem = "Nonce zero";
                     }
                 }
             } else {
                 sProblem = "Unsolicited pong without ping";
             }
         } else {
             // This is most likely a bug in another implementation somewhere;
             // cancel this ping
             bPingFinished = true;
             sProblem = "Short payload";
         }
 
         if (!(sProblem.empty())) {
             LogPrint(BCLog::NET,
                      "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
                      pfrom.GetId(), sProblem, peer->m_ping_nonce_sent, nonce,
                      nAvail);
         }
         if (bPingFinished) {
             peer->m_ping_nonce_sent = 0;
         }
         return;
     }
 
     if (msg_type == NetMsgType::FILTERLOAD) {
         if (!(peer->m_our_services & NODE_BLOOM)) {
             LogPrint(BCLog::NET,
                      "filterload received despite not offering bloom services "
                      "from peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
         CBloomFilter filter;
         vRecv >> filter;
 
         if (!filter.IsWithinSizeConstraints()) {
             // There is no excuse for sending a too-large filter
             Misbehaving(*peer, 100, "too-large bloom filter");
         } else if (auto tx_relay = peer->GetTxRelay()) {
             {
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 tx_relay->m_bloom_filter.reset(new CBloomFilter(filter));
                 tx_relay->m_relay_txs = true;
             }
             pfrom.m_bloom_filter_loaded = true;
         }
         return;
     }
 
     if (msg_type == NetMsgType::FILTERADD) {
         if (!(peer->m_our_services & NODE_BLOOM)) {
             LogPrint(BCLog::NET,
                      "filteradd received despite not offering bloom services "
                      "from peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
         std::vector<uint8_t> vData;
         vRecv >> vData;
 
         // Nodes must NEVER send a data item > 520 bytes (the max size for a
         // script data object, and thus, the maximum size any matched object can
         // have) in a filteradd message.
         bool bad = false;
         if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
             bad = true;
         } else if (auto tx_relay = peer->GetTxRelay()) {
             LOCK(tx_relay->m_bloom_filter_mutex);
             if (tx_relay->m_bloom_filter) {
                 tx_relay->m_bloom_filter->insert(vData);
             } else {
                 bad = true;
             }
         }
         if (bad) {
             // The structure of this code doesn't really allow for a good error
             // code. We'll go generic.
             Misbehaving(*peer, 100, "bad filteradd message");
         }
         return;
     }
 
     if (msg_type == NetMsgType::FILTERCLEAR) {
         if (!(peer->m_our_services & NODE_BLOOM)) {
             LogPrint(BCLog::NET,
                      "filterclear received despite not offering bloom services "
                      "from peer=%d; disconnecting\n",
                      pfrom.GetId());
             pfrom.fDisconnect = true;
             return;
         }
         auto tx_relay = peer->GetTxRelay();
         if (!tx_relay) {
             return;
         }
 
         {
             LOCK(tx_relay->m_bloom_filter_mutex);
             tx_relay->m_bloom_filter = nullptr;
             tx_relay->m_relay_txs = true;
         }
         pfrom.m_bloom_filter_loaded = false;
         pfrom.m_relays_txs = true;
         return;
     }
 
     if (msg_type == NetMsgType::FEEFILTER) {
         Amount newFeeFilter = Amount::zero();
         vRecv >> newFeeFilter;
         if (MoneyRange(newFeeFilter)) {
             if (auto tx_relay = peer->GetTxRelay()) {
                 tx_relay->m_fee_filter_received = newFeeFilter;
             }
             LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n",
                      CFeeRate(newFeeFilter).ToString(), pfrom.GetId());
         }
         return;
     }
 
     if (msg_type == NetMsgType::GETCFILTERS) {
         ProcessGetCFilters(pfrom, *peer, vRecv);
         return;
     }
 
     if (msg_type == NetMsgType::GETCFHEADERS) {
         ProcessGetCFHeaders(pfrom, *peer, vRecv);
         return;
     }
 
     if (msg_type == NetMsgType::GETCFCHECKPT) {
         ProcessGetCFCheckPt(pfrom, *peer, vRecv);
         return;
     }
 
     if (msg_type == NetMsgType::NOTFOUND) {
         std::vector<CInv> vInv;
         vRecv >> vInv;
         // A peer might send up to 1 notfound per getdata request, but no more
         if (vInv.size() <= PROOF_REQUEST_PARAMS.max_peer_announcements +
                                TX_REQUEST_PARAMS.max_peer_announcements +
                                MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             for (CInv &inv : vInv) {
                 if (inv.IsMsgTx()) {
                     // If we receive a NOTFOUND message for a tx we requested,
                     // mark the announcement for it as completed in
                     // InvRequestTracker.
                     LOCK(::cs_main);
                     m_txrequest.ReceivedResponse(pfrom.GetId(), TxId(inv.hash));
                     continue;
                 }
                 if (inv.IsMsgProof()) {
                     LOCK(cs_proofrequest);
                     m_proofrequest.ReceivedResponse(
                         pfrom.GetId(), avalanche::ProofId(inv.hash));
                 }
             }
         }
         return;
     }
 
     // Ignore unknown commands for extensibility
     LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n",
              SanitizeString(msg_type), pfrom.GetId());
     return;
 }
 
 bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode &pnode, Peer &peer) {
     {
         LOCK(peer.m_misbehavior_mutex);
 
         // There's nothing to do if the m_should_discourage flag isn't set
         if (!peer.m_should_discourage) {
             return false;
         }
 
         peer.m_should_discourage = false;
     } // peer.m_misbehavior_mutex
 
     if (pnode.HasPermission(NetPermissionFlags::NoBan)) {
         // We never disconnect or discourage peers for bad behavior if they have
         // NetPermissionFlags::NoBan permission
         LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id);
         return false;
     }
 
     if (pnode.IsManualConn()) {
         // We never disconnect or discourage manual peers for bad behavior
         LogPrintf("Warning: not punishing manually connected peer %d!\n",
                   peer.m_id);
         return false;
     }
 
     if (pnode.addr.IsLocal()) {
         // We disconnect local peers for bad behavior but don't discourage
         // (since that would discourage all peers on the same local address)
         LogPrint(BCLog::NET,
                  "Warning: disconnecting but not discouraging %s peer %d!\n",
                  pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id);
         pnode.fDisconnect = true;
         return true;
     }
 
     // Normal case: Disconnect the peer and discourage all nodes sharing the
     // address
     LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n",
              peer.m_id);
     if (m_banman) {
         m_banman->Discourage(pnode.addr);
     }
     m_connman.DisconnectNode(pnode.addr);
     return true;
 }
 
 bool PeerManagerImpl::ProcessMessages(const Config &config, CNode *pfrom,
                                       std::atomic<bool> &interruptMsgProc) {
     AssertLockHeld(g_msgproc_mutex);
 
     //
     // Message format
     //  (4) message start
     //  (12) command
     //  (4) size
     //  (4) checksum
     //  (x) data
     //
     bool fMoreWork = false;
 
     PeerRef peer = GetPeerRef(pfrom->GetId());
     if (peer == nullptr) {
         return false;
     }
 
     {
         LOCK(peer->m_getdata_requests_mutex);
         if (!peer->m_getdata_requests.empty()) {
             ProcessGetData(config, *pfrom, *peer, interruptMsgProc);
         }
     }
 
     const bool processed_orphan = ProcessOrphanTx(config, *peer);
 
     if (pfrom->fDisconnect) {
         return false;
     }
 
     if (processed_orphan) {
         return true;
     }
 
     // this maintains the order of responses and prevents m_getdata_requests to
     // grow unbounded
     {
         LOCK(peer->m_getdata_requests_mutex);
         if (!peer->m_getdata_requests.empty()) {
             return true;
         }
     }
 
     // Don't bother if send buffer is too full to respond anyway
     if (pfrom->fPauseSend) {
         return false;
     }
 
     std::list<CNetMessage> msgs;
     {
         LOCK(pfrom->cs_vProcessMsg);
         if (pfrom->vProcessMsg.empty()) {
             return false;
         }
         // Just take one message
         msgs.splice(msgs.begin(), pfrom->vProcessMsg,
                     pfrom->vProcessMsg.begin());
         pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size;
         pfrom->fPauseRecv =
             pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize();
         fMoreWork = !pfrom->vProcessMsg.empty();
     }
     CNetMessage &msg(msgs.front());
 
     TRACE6(net, inbound_message, pfrom->GetId(), pfrom->m_addr_name.c_str(),
            pfrom->ConnectionTypeAsString().c_str(), msg.m_type.c_str(),
            msg.m_recv.size(), msg.m_recv.data());
 
     if (gArgs.GetBoolArg("-capturemessages", false)) {
         CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv),
                        /*is_incoming=*/true);
     }
 
     msg.SetVersion(pfrom->GetCommonVersion());
 
     // Check network magic
     if (!msg.m_valid_netmagic) {
         LogPrint(BCLog::NET,
                  "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n",
                  SanitizeString(msg.m_type), pfrom->GetId());
 
         // Make sure we discourage where that come from for some time.
         if (m_banman) {
             m_banman->Discourage(pfrom->addr);
         }
         m_connman.DisconnectNode(pfrom->addr);
 
         pfrom->fDisconnect = true;
         return false;
     }
 
     // Check header
     if (!msg.m_valid_header) {
         LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n",
                  SanitizeString(msg.m_type), pfrom->GetId());
         return fMoreWork;
     }
 
     // Checksum
     CDataStream &vRecv = msg.m_recv;
     if (!msg.m_valid_checksum) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n",
                  __func__, SanitizeString(msg.m_type), msg.m_message_size,
                  pfrom->GetId());
         if (m_banman) {
             m_banman->Discourage(pfrom->addr);
         }
         m_connman.DisconnectNode(pfrom->addr);
         return fMoreWork;
     }
 
     try {
         ProcessMessage(config, *pfrom, msg.m_type, vRecv, msg.m_time,
                        interruptMsgProc);
         if (interruptMsgProc) {
             return false;
         }
 
         {
             LOCK(peer->m_getdata_requests_mutex);
             if (!peer->m_getdata_requests.empty()) {
                 fMoreWork = true;
             }
         }
         // Does this peer has an orphan ready to reconsider?
         // (Note: we may have provided a parent for an orphan provided by
         // another peer that was already processed; in that case, the extra work
         // may not be noticed, possibly resulting in an unnecessary 100ms delay)
         if (m_orphanage.HaveTxToReconsider(peer->m_id)) {
             fMoreWork = true;
         }
     } catch (const std::exception &e) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n",
                  __func__, SanitizeString(msg.m_type), msg.m_message_size,
                  e.what(), typeid(e).name());
     } catch (...) {
         LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n",
                  __func__, SanitizeString(msg.m_type), msg.m_message_size);
     }
 
     return fMoreWork;
 }
 
 void PeerManagerImpl::ConsiderEviction(CNode &pto, Peer &peer,
                                        std::chrono::seconds time_in_seconds) {
     AssertLockHeld(cs_main);
 
     CNodeState &state = *State(pto.GetId());
     const CNetMsgMaker msgMaker(pto.GetCommonVersion());
 
     if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() &&
         state.fSyncStarted) {
         // This is an outbound peer subject to disconnection if they don't
         // announce a block with as much work as the current tip within
         // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their
         // chain has more work than ours, we should sync to it, unless it's
         // invalid, in which case we should find that out and disconnect from
         // them elsewhere).
         if (state.pindexBestKnownBlock != nullptr &&
             state.pindexBestKnownBlock->nChainWork >=
                 m_chainman.ActiveChain().Tip()->nChainWork) {
             if (state.m_chain_sync.m_timeout != 0s) {
                 state.m_chain_sync.m_timeout = 0s;
                 state.m_chain_sync.m_work_header = nullptr;
                 state.m_chain_sync.m_sent_getheaders = false;
             }
         } else if (state.m_chain_sync.m_timeout == 0s ||
                    (state.m_chain_sync.m_work_header != nullptr &&
                     state.pindexBestKnownBlock != nullptr &&
                     state.pindexBestKnownBlock->nChainWork >=
                         state.m_chain_sync.m_work_header->nChainWork)) {
             // Our best block known by this peer is behind our tip, and we're
             // either noticing that for the first time, OR this peer was able to
             // catch up to some earlier point where we checked against our tip.
             // Either way, set a new timeout based on current tip.
             state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
             state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
             state.m_chain_sync.m_sent_getheaders = false;
         } else if (state.m_chain_sync.m_timeout > 0s &&
                    time_in_seconds > state.m_chain_sync.m_timeout) {
             // No evidence yet that our peer has synced to a chain with work
             // equal to that of our tip, when we first detected it was behind.
             // Send a single getheaders message to give the peer a chance to
             // update us.
             if (state.m_chain_sync.m_sent_getheaders) {
                 // They've run out of time to catch up!
                 LogPrintf(
                     "Disconnecting outbound peer %d for old chain, best known "
                     "block = %s\n",
                     pto.GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>");
                 pto.fDisconnect = true;
             } else {
                 assert(state.m_chain_sync.m_work_header);
                 // Here, we assume that the getheaders message goes out,
                 // because it'll either go out or be skipped because of a
                 // getheaders in-flight already, in which case the peer should
                 // still respond to us with a sufficiently high work chain tip.
                 MaybeSendGetHeaders(
                     pto, GetLocator(state.m_chain_sync.m_work_header->pprev),
                     peer);
                 LogPrint(
                     BCLog::NET,
                     "sending getheaders to outbound peer=%d to verify chain "
                     "work (current best known block:%s, benchmark blockhash: "
                     "%s)\n",
                     pto.GetId(),
                     state.pindexBestKnownBlock != nullptr
                         ? state.pindexBestKnownBlock->GetBlockHash().ToString()
                         : "<none>",
                     state.m_chain_sync.m_work_header->GetBlockHash()
                         .ToString());
                 state.m_chain_sync.m_sent_getheaders = true;
                 // Bump the timeout to allow a response, which could clear the
                 // timeout (if the response shows the peer has synced), reset
                 // the timeout (if the peer syncs to the required work but not
                 // to our tip), or result in disconnect (if we advance to the
                 // timeout and pindexBestKnownBlock has not sufficiently
                 // progressed)
                 state.m_chain_sync.m_timeout =
                     time_in_seconds + HEADERS_RESPONSE_TIME;
             }
         }
     }
 }
 
 void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now) {
     // If we have any extra block-relay-only peers, disconnect the youngest
     // unless it's given us a block -- in which case, compare with the
     // second-youngest, and out of those two, disconnect the peer who least
     // recently gave us a block.
     // The youngest block-relay-only peer would be the extra peer we connected
     // to temporarily in order to sync our tip; see net.cpp.
     // Note that we use higher nodeid as a measure for most recent connection.
     if (m_connman.GetExtraBlockRelayCount() > 0) {
         std::pair<NodeId, std::chrono::seconds> youngest_peer{-1, 0},
             next_youngest_peer{-1, 0};
 
         m_connman.ForEachNode([&](CNode *pnode) {
             if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) {
                 return;
             }
             if (pnode->GetId() > youngest_peer.first) {
                 next_youngest_peer = youngest_peer;
                 youngest_peer.first = pnode->GetId();
                 youngest_peer.second = pnode->m_last_block_time;
             }
         });
 
         NodeId to_disconnect = youngest_peer.first;
         if (youngest_peer.second > next_youngest_peer.second) {
             // Our newest block-relay-only peer gave us a block more recently;
             // disconnect our second youngest.
             to_disconnect = next_youngest_peer.first;
         }
 
         m_connman.ForNode(
             to_disconnect,
             [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
                 AssertLockHeld(::cs_main);
                 // Make sure we're not getting a block right now, and that we've
                 // been connected long enough for this eviction to happen at
                 // all. Note that we only request blocks from a peer if we learn
                 // of a valid headers chain with at least as much work as our
                 // tip.
                 CNodeState *node_state = State(pnode->GetId());
                 if (node_state == nullptr ||
                     (now - pnode->m_connected >= MINIMUM_CONNECT_TIME &&
                      node_state->nBlocksInFlight == 0)) {
                     pnode->fDisconnect = true;
                     LogPrint(BCLog::NET,
                              "disconnecting extra block-relay-only peer=%d "
                              "(last block received at time %d)\n",
                              pnode->GetId(),
                              count_seconds(pnode->m_last_block_time));
                     return true;
                 } else {
                     LogPrint(
                         BCLog::NET,
                         "keeping block-relay-only peer=%d chosen for eviction "
                         "(connect time: %d, blocks_in_flight: %d)\n",
                         pnode->GetId(), count_seconds(pnode->m_connected),
                         node_state->nBlocksInFlight);
                 }
                 return false;
             });
     }
 
     // Check whether we have too many OUTBOUND_FULL_RELAY peers
     if (m_connman.GetExtraFullOutboundCount() <= 0) {
         return;
     }
 
     // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
     // Pick the OUTBOUND_FULL_RELAY peer that least recently announced us a new
     // block, with ties broken by choosing the more recent connection (higher
     // node id)
     NodeId worst_peer = -1;
     int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
 
     m_connman.ForEachNode([&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(
                               ::cs_main) {
         AssertLockHeld(::cs_main);
 
         // Only consider OUTBOUND_FULL_RELAY peers that are not already marked
         // for disconnection
         if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) {
             return;
         }
         CNodeState *state = State(pnode->GetId());
         if (state == nullptr) {
             // shouldn't be possible, but just in case
             return;
         }
         // Don't evict our protected peers
         if (state->m_chain_sync.m_protect) {
             return;
         }
         if (state->m_last_block_announcement < oldest_block_announcement ||
             (state->m_last_block_announcement == oldest_block_announcement &&
              pnode->GetId() > worst_peer)) {
             worst_peer = pnode->GetId();
             oldest_block_announcement = state->m_last_block_announcement;
         }
     });
 
     if (worst_peer == -1) {
         return;
     }
 
     bool disconnected = m_connman.ForNode(
         worst_peer, [&](CNode *pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
             AssertLockHeld(::cs_main);
 
             // Only disconnect a peer that has been connected to us for some
             // reasonable fraction of our check-frequency, to give it time for
             // new information to have arrived. Also don't disconnect any peer
             // we're trying to download a block from.
             CNodeState &state = *State(pnode->GetId());
             if (now - pnode->m_connected > MINIMUM_CONNECT_TIME &&
                 state.nBlocksInFlight == 0) {
                 LogPrint(BCLog::NET,
                          "disconnecting extra outbound peer=%d (last block "
                          "announcement received at time %d)\n",
                          pnode->GetId(), oldest_block_announcement);
                 pnode->fDisconnect = true;
                 return true;
             } else {
                 LogPrint(BCLog::NET,
                          "keeping outbound peer=%d chosen for eviction "
                          "(connect time: %d, blocks_in_flight: %d)\n",
                          pnode->GetId(), count_seconds(pnode->m_connected),
                          state.nBlocksInFlight);
                 return false;
             }
         });
 
     if (disconnected) {
         // If we disconnected an extra peer, that means we successfully
         // connected to at least one peer after the last time we detected a
         // stale tip. Don't try any more extra peers until we next detect a
         // stale tip, to limit the load we put on the network from these extra
         // connections.
         m_connman.SetTryNewOutboundPeer(false);
     }
 }
 
 void PeerManagerImpl::CheckForStaleTipAndEvictPeers() {
     LOCK(cs_main);
 
     auto now{GetTime<std::chrono::seconds>()};
 
     EvictExtraOutboundPeers(now);
 
     if (now > m_stale_tip_check_time) {
         // Check whether our tip is stale, and if so, allow using an extra
         // outbound peer.
         if (!m_chainman.m_blockman.LoadingBlocks() &&
             m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() &&
             TipMayBeStale()) {
             LogPrintf("Potential stale tip detected, will try using extra "
                       "outbound peer (last tip update: %d seconds ago)\n",
                       count_seconds(now - m_last_tip_update.load()));
             m_connman.SetTryNewOutboundPeer(true);
         } else if (m_connman.GetTryNewOutboundPeer()) {
             m_connman.SetTryNewOutboundPeer(false);
         }
         m_stale_tip_check_time = now + STALE_CHECK_INTERVAL;
     }
 
     if (!m_initial_sync_finished && CanDirectFetch()) {
         m_connman.StartExtraBlockRelayPeers();
         m_initial_sync_finished = true;
     }
 }
 
 void PeerManagerImpl::MaybeSendPing(CNode &node_to, Peer &peer,
                                     std::chrono::microseconds now) {
     if (m_connman.ShouldRunInactivityChecks(
             node_to, std::chrono::duration_cast<std::chrono::seconds>(now)) &&
         peer.m_ping_nonce_sent &&
         now > peer.m_ping_start.load() + TIMEOUT_INTERVAL) {
         // The ping timeout is using mocktime. To disable the check during
         // testing, increase -peertimeout.
         LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n",
                  0.000001 * count_microseconds(now - peer.m_ping_start.load()),
                  peer.m_id);
         node_to.fDisconnect = true;
         return;
     }
 
     const CNetMsgMaker msgMaker(node_to.GetCommonVersion());
     bool pingSend = false;
 
     if (peer.m_ping_queued) {
         // RPC ping request by user
         pingSend = true;
     }
 
     if (peer.m_ping_nonce_sent == 0 &&
         now > peer.m_ping_start.load() + PING_INTERVAL) {
         // Ping automatically sent as a latency probe & keepalive.
         pingSend = true;
     }
 
     if (pingSend) {
         uint64_t nonce;
         do {
             nonce = GetRand<uint64_t>();
         } while (nonce == 0);
         peer.m_ping_queued = false;
         peer.m_ping_start = now;
         if (node_to.GetCommonVersion() > BIP0031_VERSION) {
             peer.m_ping_nonce_sent = nonce;
             m_connman.PushMessage(&node_to,
                                   msgMaker.Make(NetMsgType::PING, nonce));
         } else {
             // Peer is too old to support ping command with nonce, pong will
             // never arrive.
             peer.m_ping_nonce_sent = 0;
             m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING));
         }
     }
 }
 
 void PeerManagerImpl::MaybeSendAddr(CNode &node, Peer &peer,
                                     std::chrono::microseconds current_time) {
     // Nothing to do for non-address-relay peers
     if (!peer.m_addr_relay_enabled) {
         return;
     }
 
     LOCK(peer.m_addr_send_times_mutex);
     if (fListen && !m_chainman.ActiveChainstate().IsInitialBlockDownload() &&
         peer.m_next_local_addr_send < current_time) {
         // If we've sent before, clear the bloom filter for the peer, so
         // that our self-announcement will actually go out. This might
         // be unnecessary if the bloom filter has already rolled over
         // since our last self-announcement, but there is only a small
         // bandwidth cost that we can incur by doing this (which happens
         // once a day on average).
         if (peer.m_next_local_addr_send != 0us) {
             peer.m_addr_known->reset();
         }
         if (std::optional<CService> local_service = GetLocalAddrForPeer(node)) {
             CAddress local_addr{*local_service, peer.m_our_services,
                                 AdjustedTime()};
             FastRandomContext insecure_rand;
             PushAddress(peer, local_addr, insecure_rand);
         }
         peer.m_next_local_addr_send = GetExponentialRand(
             current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
     }
 
     // We sent an `addr` message to this peer recently. Nothing more to do.
     if (current_time <= peer.m_next_addr_send) {
         return;
     }
 
     peer.m_next_addr_send =
         GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
 
     const size_t max_addr_to_send = GetMaxAddrToSend();
     if (!Assume(peer.m_addrs_to_send.size() <= max_addr_to_send)) {
         // Should be impossible since we always check size before adding to
         // m_addrs_to_send. Recover by trimming the vector.
         peer.m_addrs_to_send.resize(max_addr_to_send);
     }
 
     // Remove addr records that the peer already knows about, and add new
     // addrs to the m_addr_known filter on the same pass.
     auto addr_already_known =
         [&peer](const CAddress &addr)
             EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) {
                 bool ret = peer.m_addr_known->contains(addr.GetKey());
                 if (!ret) {
                     peer.m_addr_known->insert(addr.GetKey());
                 }
                 return ret;
             };
     peer.m_addrs_to_send.erase(std::remove_if(peer.m_addrs_to_send.begin(),
                                               peer.m_addrs_to_send.end(),
                                               addr_already_known),
                                peer.m_addrs_to_send.end());
 
     // No addr messages to send
     if (peer.m_addrs_to_send.empty()) {
         return;
     }
 
     const char *msg_type;
     int make_flags;
     if (peer.m_wants_addrv2) {
         msg_type = NetMsgType::ADDRV2;
         make_flags = ADDRV2_FORMAT;
     } else {
         msg_type = NetMsgType::ADDR;
         make_flags = 0;
     }
     m_connman.PushMessage(
         &node, CNetMsgMaker(node.GetCommonVersion())
                    .Make(make_flags, msg_type, peer.m_addrs_to_send));
     peer.m_addrs_to_send.clear();
 
     // we only send the big addr message once
     if (peer.m_addrs_to_send.capacity() > 40) {
         peer.m_addrs_to_send.shrink_to_fit();
     }
 }
 
 void PeerManagerImpl::MaybeSendSendHeaders(CNode &node, Peer &peer) {
     // Delay sending SENDHEADERS (BIP 130) until we're done with an
     // initial-headers-sync with this peer. Receiving headers announcements for
     // new blocks while trying to sync their headers chain is problematic,
     // because of the state tracking done.
     if (!peer.m_sent_sendheaders &&
         node.GetCommonVersion() >= SENDHEADERS_VERSION) {
         LOCK(cs_main);
         CNodeState &state = *State(node.GetId());
         if (state.pindexBestKnownBlock != nullptr &&
             state.pindexBestKnownBlock->nChainWork >
                 m_chainman.MinimumChainWork()) {
             // Tell our peer we prefer to receive headers rather than inv's
             // We send this to non-NODE NETWORK peers as well, because even
             // non-NODE NETWORK peers can announce blocks (such as pruning
             // nodes)
             m_connman.PushMessage(&node, CNetMsgMaker(node.GetCommonVersion())
                                              .Make(NetMsgType::SENDHEADERS));
             peer.m_sent_sendheaders = true;
         }
     }
 }
 
 void PeerManagerImpl::MaybeSendFeefilter(
     CNode &pto, Peer &peer, std::chrono::microseconds current_time) {
     if (m_ignore_incoming_txs) {
         return;
     }
     if (pto.GetCommonVersion() < FEEFILTER_VERSION) {
         return;
     }
     // peers with the forcerelay permission should not filter txs to us
     if (pto.HasPermission(NetPermissionFlags::ForceRelay)) {
         return;
     }
     // Don't send feefilter messages to outbound block-relay-only peers since
     // they should never announce transactions to us, regardless of feefilter
     // state.
     if (pto.IsBlockOnlyConn()) {
         return;
     }
 
     Amount currentFilter = m_mempool.GetMinFee().GetFeePerK();
     static FeeFilterRounder g_filter_rounder{
         CFeeRate{DEFAULT_MIN_RELAY_TX_FEE_PER_KB}};
 
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
         // Received tx-inv messages are discarded when the active
         // chainstate is in IBD, so tell the peer to not send them.
         currentFilter = MAX_MONEY;
     } else {
         static const Amount MAX_FILTER{g_filter_rounder.round(MAX_MONEY)};
         if (peer.m_fee_filter_sent == MAX_FILTER) {
             // Send the current filter if we sent MAX_FILTER previously
             // and made it out of IBD.
             peer.m_next_send_feefilter = 0us;
         }
     }
     if (current_time > peer.m_next_send_feefilter) {
         Amount filterToSend = g_filter_rounder.round(currentFilter);
         // We always have a fee filter of at least the min relay fee
         filterToSend =
             std::max(filterToSend, m_mempool.m_min_relay_feerate.GetFeePerK());
         if (filterToSend != peer.m_fee_filter_sent) {
             m_connman.PushMessage(
                 &pto, CNetMsgMaker(pto.GetCommonVersion())
                           .Make(NetMsgType::FEEFILTER, filterToSend));
             peer.m_fee_filter_sent = filterToSend;
         }
         peer.m_next_send_feefilter =
             GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
     }
     // If the fee filter has changed substantially and it's still more than
     // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the
     // broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
     else if (current_time + MAX_FEEFILTER_CHANGE_DELAY <
                  peer.m_next_send_feefilter &&
              (currentFilter < 3 * peer.m_fee_filter_sent / 4 ||
               currentFilter > 4 * peer.m_fee_filter_sent / 3)) {
         peer.m_next_send_feefilter =
             current_time + GetRandomDuration<std::chrono::microseconds>(
                                MAX_FEEFILTER_CHANGE_DELAY);
     }
 }
 
 namespace {
 class CompareInvMempoolOrder {
     CTxMemPool *mp;
 
 public:
     explicit CompareInvMempoolOrder(CTxMemPool *_mempool) : mp(_mempool) {}
 
     bool operator()(std::set<TxId>::iterator a, std::set<TxId>::iterator b) {
         /**
          * As std::make_heap produces a max-heap, we want the entries which
          * are topologically earlier to sort later.
          */
         return mp->CompareTopologically(*b, *a);
     }
 };
 } // namespace
 
 bool PeerManagerImpl::SetupAddressRelay(const CNode &node, Peer &peer) {
     // We don't participate in addr relay with outbound block-relay-only
     // connections to prevent providing adversaries with the additional
     // information of addr traffic to infer the link.
     if (node.IsBlockOnlyConn()) {
         return false;
     }
 
     if (!peer.m_addr_relay_enabled.exchange(true)) {
         // First addr message we have received from the peer, initialize
         // m_addr_known
         peer.m_addr_known = std::make_unique<CRollingBloomFilter>(5000, 0.001);
     }
 
     return true;
 }
 
 bool PeerManagerImpl::SendMessages(const Config &config, CNode *pto) {
     AssertLockHeld(g_msgproc_mutex);
 
     PeerRef peer = GetPeerRef(pto->GetId());
     if (!peer) {
         return false;
     }
     const Consensus::Params &consensusParams = m_chainparams.GetConsensus();
 
     // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll
     // disconnect misbehaving peers even before the version handshake is
     // complete.
     if (MaybeDiscourageAndDisconnect(*pto, *peer)) {
         return true;
     }
 
     // Don't send anything until the version handshake is complete
     if (!pto->fSuccessfullyConnected || pto->fDisconnect) {
         return true;
     }
 
     // If we get here, the outgoing message serialization version is set and
     // can't change.
     const CNetMsgMaker msgMaker(pto->GetCommonVersion());
 
     const auto current_time{GetTime<std::chrono::microseconds>()};
 
     if (pto->IsAddrFetchConn() &&
         current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
         LogPrint(BCLog::NET,
                  "addrfetch connection timeout; disconnecting peer=%d\n",
                  pto->GetId());
         pto->fDisconnect = true;
         return true;
     }
 
     MaybeSendPing(*pto, *peer, current_time);
 
     // MaybeSendPing may have marked peer for disconnection
     if (pto->fDisconnect) {
         return true;
     }
 
     bool sync_blocks_and_headers_from_peer = false;
 
     MaybeSendAddr(*pto, *peer, current_time);
 
     MaybeSendSendHeaders(*pto, *peer);
 
     {
         LOCK(cs_main);
 
         CNodeState &state = *State(pto->GetId());
 
         // Start block sync
         if (m_chainman.m_best_header == nullptr) {
             m_chainman.m_best_header = m_chainman.ActiveChain().Tip();
         }
 
         // Determine whether we might try initial headers sync or parallel
         // block download from this peer -- this mostly affects behavior while
         // in IBD (once out of IBD, we sync from all peers).
         if (state.fPreferredDownload) {
             sync_blocks_and_headers_from_peer = true;
         } else if (CanServeBlocks(*peer) && !pto->IsAddrFetchConn()) {
             // Typically this is an inbound peer. If we don't have any outbound
             // peers, or if we aren't downloading any blocks from such peers,
             // then allow block downloads from this peer, too.
             // We prefer downloading blocks from outbound peers to avoid
             // putting undue load on (say) some home user who is just making
             // outbound connections to the network, but if our only source of
             // the latest blocks is from an inbound peer, we have to be sure to
             // eventually download it (and not just wait indefinitely for an
             // outbound peer to have it).
             if (m_num_preferred_download_peers == 0 ||
                 mapBlocksInFlight.empty()) {
                 sync_blocks_and_headers_from_peer = true;
             }
         }
 
         if (!state.fSyncStarted && CanServeBlocks(*peer) &&
             !m_chainman.m_blockman.LoadingBlocks()) {
             // Only actively request headers from a single peer, unless we're
             // close to today.
             if ((nSyncStarted == 0 && sync_blocks_and_headers_from_peer) ||
                 m_chainman.m_best_header->Time() > GetAdjustedTime() - 24h) {
                 const CBlockIndex *pindexStart = m_chainman.m_best_header;
                 /**
                  * If possible, start at the block preceding the currently best
                  * known header. This ensures that we always get a non-empty
                  * list of headers back as long as the peer is up-to-date. With
                  * a non-empty response, we can initialise the peer's known best
                  * block. This wouldn't be possible if we requested starting at
                  * m_best_header and got back an empty response.
                  */
                 if (pindexStart->pprev) {
                     pindexStart = pindexStart->pprev;
                 }
                 if (MaybeSendGetHeaders(*pto, GetLocator(pindexStart), *peer)) {
                     LogPrint(
                         BCLog::NET,
                         "initial getheaders (%d) to peer=%d (startheight:%d)\n",
                         pindexStart->nHeight, pto->GetId(),
                         peer->m_starting_height);
 
                     state.fSyncStarted = true;
                     peer->m_headers_sync_timeout =
                         current_time + HEADERS_DOWNLOAD_TIMEOUT_BASE +
                         (
                             // Convert HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER to
                             // microseconds before scaling to maintain precision
                             std::chrono::microseconds{
                                 HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER} *
                             Ticks<std::chrono::seconds>(
                                 GetAdjustedTime() -
                                 m_chainman.m_best_header->Time()) /
                             consensusParams.nPowTargetSpacing);
                     nSyncStarted++;
                 }
             }
         }
 
         //
         // Try sending block announcements via headers
         //
         {
             // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block
             // hashes we're relaying, and our peer wants headers announcements,
             // then find the first header not yet known to our peer but would
             // connect, and send. If no header would connect, or if we have too
             // many blocks, or if the peer doesn't want headers, just add all to
             // the inv queue.
             LOCK(peer->m_block_inv_mutex);
             std::vector<CBlock> vHeaders;
             bool fRevertToInv =
                 ((!peer->m_prefers_headers &&
                   (!state.m_requested_hb_cmpctblocks ||
                    peer->m_blocks_for_headers_relay.size() > 1)) ||
                  peer->m_blocks_for_headers_relay.size() >
                      MAX_BLOCKS_TO_ANNOUNCE);
             // last header queued for delivery
             const CBlockIndex *pBestIndex = nullptr;
             // ensure pindexBestKnownBlock is up-to-date
             ProcessBlockAvailability(pto->GetId());
 
             if (!fRevertToInv) {
                 bool fFoundStartingHeader = false;
                 // Try to find first header that our peer doesn't have, and then
                 // send all headers past that one. If we come across an headers
                 // that aren't on m_chainman.ActiveChain(), give up.
                 for (const BlockHash &hash : peer->m_blocks_for_headers_relay) {
                     const CBlockIndex *pindex =
                         m_chainman.m_blockman.LookupBlockIndex(hash);
                     assert(pindex);
                     if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
                         // Bail out if we reorged away from this block
                         fRevertToInv = true;
                         break;
                     }
                     if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
                         // This means that the list of blocks to announce don't
                         // connect to each other. This shouldn't really be
                         // possible to hit during regular operation (because
                         // reorgs should take us to a chain that has some block
                         // not on the prior chain, which should be caught by the
                         // prior check), but one way this could happen is by
                         // using invalidateblock / reconsiderblock repeatedly on
                         // the tip, causing it to be added multiple times to
                         // m_blocks_for_headers_relay. Robustly deal with this
                         // rare situation by reverting to an inv.
                         fRevertToInv = true;
                         break;
                     }
                     pBestIndex = pindex;
                     if (fFoundStartingHeader) {
                         // add this to the headers message
                         vHeaders.push_back(pindex->GetBlockHeader());
                     } else if (PeerHasHeader(&state, pindex)) {
                         // Keep looking for the first new block.
                         continue;
                     } else if (pindex->pprev == nullptr ||
                                PeerHasHeader(&state, pindex->pprev)) {
                         // Peer doesn't have this header but they do have the
                         // prior one. Start sending headers.
                         fFoundStartingHeader = true;
                         vHeaders.push_back(pindex->GetBlockHeader());
                     } else {
                         // Peer doesn't have this header or the prior one --
                         // nothing will connect, so bail out.
                         fRevertToInv = true;
                         break;
                     }
                 }
             }
             if (!fRevertToInv && !vHeaders.empty()) {
                 if (vHeaders.size() == 1 && state.m_requested_hb_cmpctblocks) {
                     // We only send up to 1 block as header-and-ids, as
                     // otherwise probably means we're doing an initial-ish-sync
                     // or they're slow.
                     LogPrint(BCLog::NET,
                              "%s sending header-and-ids %s to peer=%d\n",
                              __func__, vHeaders.front().GetHash().ToString(),
                              pto->GetId());
 
                     std::optional<CSerializedNetMsg> cached_cmpctblock_msg;
                     {
                         LOCK(m_most_recent_block_mutex);
                         if (m_most_recent_block_hash ==
                             pBestIndex->GetBlockHash()) {
                             cached_cmpctblock_msg =
                                 msgMaker.Make(NetMsgType::CMPCTBLOCK,
                                               *m_most_recent_compact_block);
                         }
                     }
                     if (cached_cmpctblock_msg.has_value()) {
                         m_connman.PushMessage(
                             pto, std::move(cached_cmpctblock_msg.value()));
                     } else {
                         CBlock block;
                         const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(
                             block, *pBestIndex)};
                         assert(ret);
                         CBlockHeaderAndShortTxIDs cmpctblock(block);
                         m_connman.PushMessage(
                             pto,
                             msgMaker.Make(NetMsgType::CMPCTBLOCK, cmpctblock));
                     }
                     state.pindexBestHeaderSent = pBestIndex;
                 } else if (peer->m_prefers_headers) {
                     if (vHeaders.size() > 1) {
                         LogPrint(BCLog::NET,
                                  "%s: %u headers, range (%s, %s), to peer=%d\n",
                                  __func__, vHeaders.size(),
                                  vHeaders.front().GetHash().ToString(),
                                  vHeaders.back().GetHash().ToString(),
                                  pto->GetId());
                     } else {
                         LogPrint(BCLog::NET,
                                  "%s: sending header %s to peer=%d\n", __func__,
                                  vHeaders.front().GetHash().ToString(),
                                  pto->GetId());
                     }
                     m_connman.PushMessage(
                         pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
                     state.pindexBestHeaderSent = pBestIndex;
                 } else {
                     fRevertToInv = true;
                 }
             }
             if (fRevertToInv) {
                 // If falling back to using an inv, just try to inv the tip. The
                 // last entry in m_blocks_for_headers_relay was our tip at some
                 // point in the past.
                 if (!peer->m_blocks_for_headers_relay.empty()) {
                     const BlockHash &hashToAnnounce =
                         peer->m_blocks_for_headers_relay.back();
                     const CBlockIndex *pindex =
                         m_chainman.m_blockman.LookupBlockIndex(hashToAnnounce);
                     assert(pindex);
 
                     // Warn if we're announcing a block that is not on the main
                     // chain. This should be very rare and could be optimized
                     // out. Just log for now.
                     if (m_chainman.ActiveChain()[pindex->nHeight] != pindex) {
                         LogPrint(
                             BCLog::NET,
                             "Announcing block %s not on main chain (tip=%s)\n",
                             hashToAnnounce.ToString(),
                             m_chainman.ActiveChain()
                                 .Tip()
                                 ->GetBlockHash()
                                 .ToString());
                     }
 
                     // If the peer's chain has this block, don't inv it back.
                     if (!PeerHasHeader(&state, pindex)) {
                         peer->m_blocks_for_inv_relay.push_back(hashToAnnounce);
                         LogPrint(BCLog::NET,
                                  "%s: sending inv peer=%d hash=%s\n", __func__,
                                  pto->GetId(), hashToAnnounce.ToString());
                     }
                 }
             }
             peer->m_blocks_for_headers_relay.clear();
         }
     } // release cs_main
 
     //
     // Message: inventory
     //
     std::vector<CInv> vInv;
     auto addInvAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
         vInv.emplace_back(type, hash);
         if (vInv.size() == MAX_INV_SZ) {
             m_connman.PushMessage(
                 pto, msgMaker.Make(NetMsgType::INV, std::move(vInv)));
             vInv.clear();
         }
     };
 
     {
         LOCK(cs_main);
 
         {
             LOCK(peer->m_block_inv_mutex);
 
             vInv.reserve(std::max<size_t>(peer->m_blocks_for_inv_relay.size(),
                                           INVENTORY_BROADCAST_MAX_PER_MB *
                                               config.GetMaxBlockSize() /
                                               1000000));
 
             // Add blocks
             for (const BlockHash &hash : peer->m_blocks_for_inv_relay) {
                 addInvAndMaybeFlush(MSG_BLOCK, hash);
             }
             peer->m_blocks_for_inv_relay.clear();
         }
 
         auto computeNextInvSendTime =
             [&](std::chrono::microseconds &next) -> bool {
             bool fSendTrickle = pto->HasPermission(NetPermissionFlags::NoBan);
 
             if (next < current_time) {
                 fSendTrickle = true;
                 if (pto->IsInboundConn()) {
                     next = NextInvToInbounds(
                         current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
                 } else {
                     // Skip delay for outbound peers, as there is less privacy
                     // concern for them.
                     next = current_time;
                 }
             }
 
             return fSendTrickle;
         };
 
         // Add proofs to inventory
         if (peer->m_proof_relay != nullptr) {
             LOCK(peer->m_proof_relay->m_proof_inventory_mutex);
 
             if (computeNextInvSendTime(
                     peer->m_proof_relay->m_next_inv_send_time)) {
                 auto it =
                     peer->m_proof_relay->m_proof_inventory_to_send.begin();
                 while (it !=
                        peer->m_proof_relay->m_proof_inventory_to_send.end()) {
                     const avalanche::ProofId proofid = *it;
 
                     it = peer->m_proof_relay->m_proof_inventory_to_send.erase(
                         it);
 
                     if (peer->m_proof_relay->m_proof_inventory_known_filter
                             .contains(proofid)) {
                         continue;
                     }
 
                     peer->m_proof_relay->m_proof_inventory_known_filter.insert(
                         proofid);
                     addInvAndMaybeFlush(MSG_AVA_PROOF, proofid);
                     peer->m_proof_relay->m_recently_announced_proofs.insert(
                         proofid);
                 }
             }
         }
 
         if (auto tx_relay = peer->GetTxRelay()) {
             LOCK(tx_relay->m_tx_inventory_mutex);
             // Check whether periodic sends should happen
             const bool fSendTrickle =
                 computeNextInvSendTime(tx_relay->m_next_inv_send_time);
 
             // Time to send but the peer has requested we not relay
             // transactions.
             if (fSendTrickle) {
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 if (!tx_relay->m_relay_txs) {
                     tx_relay->m_tx_inventory_to_send.clear();
                 }
             }
 
             // Respond to BIP35 mempool requests
             if (fSendTrickle && tx_relay->m_send_mempool) {
                 auto vtxinfo = m_mempool.infoAll();
                 tx_relay->m_send_mempool = false;
                 const CFeeRate filterrate{
                     tx_relay->m_fee_filter_received.load()};
 
                 LOCK(tx_relay->m_bloom_filter_mutex);
 
                 for (const auto &txinfo : vtxinfo) {
                     const TxId &txid = txinfo.tx->GetId();
                     tx_relay->m_tx_inventory_to_send.erase(txid);
                     // Don't send transactions that peers will not put into
                     // their mempool
                     if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
                         continue;
                     }
                     if (tx_relay->m_bloom_filter &&
                         !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     tx_relay->m_tx_inventory_known_filter.insert(txid);
                     // Responses to MEMPOOL requests bypass the
                     // m_recently_announced_invs filter.
                     addInvAndMaybeFlush(MSG_TX, txid);
                 }
                 tx_relay->m_last_mempool_req =
                     std::chrono::duration_cast<std::chrono::seconds>(
                         current_time);
             }
 
             // Determine transactions to relay
             if (fSendTrickle) {
                 // Produce a vector with all candidates for sending
                 std::vector<std::set<TxId>::iterator> vInvTx;
                 vInvTx.reserve(tx_relay->m_tx_inventory_to_send.size());
                 for (std::set<TxId>::iterator it =
                          tx_relay->m_tx_inventory_to_send.begin();
                      it != tx_relay->m_tx_inventory_to_send.end(); it++) {
                     vInvTx.push_back(it);
                 }
                 const CFeeRate filterrate{
                     tx_relay->m_fee_filter_received.load()};
                 // Send out the inventory in the order of admission to our
                 // mempool, which is guaranteed to be a topological sort order.
                 // A heap is used so that not all items need sorting if only a
                 // few are being sent.
                 CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool);
                 std::make_heap(vInvTx.begin(), vInvTx.end(),
                                compareInvMempoolOrder);
                 // No reason to drain out at many times the network's
                 // capacity, especially since we have many peers and some
                 // will draw much shorter delays.
                 unsigned int nRelayedTransactions = 0;
                 LOCK(tx_relay->m_bloom_filter_mutex);
                 while (!vInvTx.empty() &&
                        nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB *
                                                   config.GetMaxBlockSize() /
                                                   1000000) {
                     // Fetch the top element from the heap
                     std::pop_heap(vInvTx.begin(), vInvTx.end(),
                                   compareInvMempoolOrder);
                     std::set<TxId>::iterator it = vInvTx.back();
                     vInvTx.pop_back();
                     const TxId txid = *it;
                     // Remove it from the to-be-sent set
                     tx_relay->m_tx_inventory_to_send.erase(it);
                     // Check if not in the filter already
                     if (tx_relay->m_tx_inventory_known_filter.contains(txid)) {
                         continue;
                     }
                     // Not in the mempool anymore? don't bother sending it.
                     auto txinfo = m_mempool.info(txid);
                     if (!txinfo.tx) {
                         continue;
                     }
                     // Peer told you to not send transactions at that
                     // feerate? Don't bother sending it.
                     if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) {
                         continue;
                     }
                     if (tx_relay->m_bloom_filter &&
                         !tx_relay->m_bloom_filter->IsRelevantAndUpdate(
                             *txinfo.tx)) {
                         continue;
                     }
                     // Send
                     tx_relay->m_recently_announced_invs.insert(txid);
                     addInvAndMaybeFlush(MSG_TX, txid);
                     nRelayedTransactions++;
                     {
                         // Expire old relay messages
                         while (!g_relay_expiration.empty() &&
                                g_relay_expiration.front().first <
                                    current_time) {
                             mapRelay.erase(g_relay_expiration.front().second);
                             g_relay_expiration.pop_front();
                         }
 
                         auto ret = mapRelay.insert(
                             std::make_pair(txid, std::move(txinfo.tx)));
                         if (ret.second) {
                             g_relay_expiration.push_back(std::make_pair(
                                 current_time + RELAY_TX_CACHE_TIME, ret.first));
                         }
                     }
                     tx_relay->m_tx_inventory_known_filter.insert(txid);
                 }
             }
         }
     } // release cs_main
 
     if (!vInv.empty()) {
         m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
     }
 
     {
         LOCK(cs_main);
 
         CNodeState &state = *State(pto->GetId());
 
         // Detect whether we're stalling
         auto stalling_timeout = m_block_stalling_timeout.load();
         if (state.m_stalling_since.count() &&
             state.m_stalling_since < current_time - stalling_timeout) {
             // Stalling only triggers when the block download window cannot
             // move. During normal steady state, the download window should be
             // much larger than the to-be-downloaded set of blocks, so
             // disconnection should only happen during initial block download.
             LogPrintf("Peer=%d is stalling block download, disconnecting\n",
                       pto->GetId());
             pto->fDisconnect = true;
             // Increase timeout for the next peer so that we don't disconnect
             // multiple peers if our own bandwidth is insufficient.
             const auto new_timeout =
                 std::min(2 * stalling_timeout, BLOCK_STALLING_TIMEOUT_MAX);
             if (stalling_timeout != new_timeout &&
                 m_block_stalling_timeout.compare_exchange_strong(
                     stalling_timeout, new_timeout)) {
                 LogPrint(
                     BCLog::NET,
                     "Increased stalling timeout temporarily to %d seconds\n",
                     count_seconds(new_timeout));
             }
             return true;
         }
         // In case there is a block that has been in flight from this peer for
         // block_interval * (1 + 0.5 * N) (with N the number of peers from which
         // we're downloading validated blocks), disconnect due to timeout.
         // We compensate for other peers to prevent killing off peers due to our
         // own downstream link being saturated. We only count validated
         // in-flight blocks so peers can't advertise non-existing block hashes
         // to unreasonably increase our timeout.
         if (state.vBlocksInFlight.size() > 0) {
             QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
             int nOtherPeersWithValidatedDownloads =
                 m_peers_downloading_from - 1;
             if (current_time >
                 state.m_downloading_since +
                     std::chrono::seconds{consensusParams.nPowTargetSpacing} *
                         (BLOCK_DOWNLOAD_TIMEOUT_BASE +
                          BLOCK_DOWNLOAD_TIMEOUT_PER_PEER *
                              nOtherPeersWithValidatedDownloads)) {
                 LogPrintf("Timeout downloading block %s from peer=%d, "
                           "disconnecting\n",
                           queuedBlock.pindex->GetBlockHash().ToString(),
                           pto->GetId());
                 pto->fDisconnect = true;
                 return true;
             }
         }
 
         // Check for headers sync timeouts
         if (state.fSyncStarted &&
             peer->m_headers_sync_timeout < std::chrono::microseconds::max()) {
             // Detect whether this is a stalling initial-headers-sync peer
             if (m_chainman.m_best_header->Time() <= GetAdjustedTime() - 24h) {
                 if (current_time > peer->m_headers_sync_timeout &&
                     nSyncStarted == 1 &&
                     (m_num_preferred_download_peers -
                          state.fPreferredDownload >=
                      1)) {
                     // Disconnect a peer (without NetPermissionFlags::NoBan
                     // permission) if it is our only sync peer, and we have
                     // others we could be using instead. Note: If all our peers
                     // are inbound, then we won't disconnect our sync peer for
                     // stalling; we have bigger problems if we can't get any
                     // outbound peers.
                     if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
                         LogPrintf("Timeout downloading headers from peer=%d, "
                                   "disconnecting\n",
                                   pto->GetId());
                         pto->fDisconnect = true;
                         return true;
                     } else {
                         LogPrintf("Timeout downloading headers from noban "
                                   "peer=%d, not disconnecting\n",
                                   pto->GetId());
                         // Reset the headers sync state so that we have a chance
                         // to try downloading from a different peer. Note: this
                         // will also result in at least one more getheaders
                         // message to be sent to this peer (eventually).
                         state.fSyncStarted = false;
                         nSyncStarted--;
                         peer->m_headers_sync_timeout = 0us;
                     }
                 }
             } else {
                 // After we've caught up once, reset the timeout so we can't
                 // trigger disconnect later.
                 peer->m_headers_sync_timeout = std::chrono::microseconds::max();
             }
         }
 
         // Check that outbound peers have reasonable chains GetTime() is used by
         // this anti-DoS logic so we can test this using mocktime.
         ConsiderEviction(*pto, *peer, GetTime<std::chrono::seconds>());
     } // release cs_main
 
     std::vector<CInv> vGetData;
 
     //
     // Message: getdata (blocks)
     //
     {
         LOCK(cs_main);
 
         CNodeState &state = *State(pto->GetId());
 
         if (CanServeBlocks(*peer) &&
             ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) ||
              !m_chainman.ActiveChainstate().IsInitialBlockDownload()) &&
             state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
             std::vector<const CBlockIndex *> vToDownload;
             NodeId staller = -1;
             FindNextBlocksToDownload(pto->GetId(),
                                      MAX_BLOCKS_IN_TRANSIT_PER_PEER -
                                          state.nBlocksInFlight,
                                      vToDownload, staller);
             for (const CBlockIndex *pindex : vToDownload) {
                 vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash()));
                 BlockRequested(config, pto->GetId(), *pindex);
                 LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n",
                          pindex->GetBlockHash().ToString(), pindex->nHeight,
                          pto->GetId());
             }
             if (state.nBlocksInFlight == 0 && staller != -1) {
                 if (State(staller)->m_stalling_since == 0us) {
                     State(staller)->m_stalling_since = current_time;
                     LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
                 }
             }
         }
     } // release cs_main
 
     auto addGetDataAndMaybeFlush = [&](uint32_t type, const uint256 &hash) {
         CInv inv(type, hash);
         LogPrint(BCLog::NET, "Requesting %s from peer=%d\n", inv.ToString(),
                  pto->GetId());
         vGetData.push_back(std::move(inv));
         if (vGetData.size() >= MAX_GETDATA_SZ) {
             m_connman.PushMessage(
                 pto, msgMaker.Make(NetMsgType::GETDATA, std::move(vGetData)));
             vGetData.clear();
         }
     };
 
     //
     // Message: getdata (proof)
     //
     {
         LOCK(cs_proofrequest);
         std::vector<std::pair<NodeId, avalanche::ProofId>> expired;
         auto requestable =
             m_proofrequest.GetRequestable(pto->GetId(), current_time, &expired);
         for (const auto &entry : expired) {
             LogPrint(BCLog::AVALANCHE,
                      "timeout of inflight proof %s from peer=%d\n",
                      entry.second.ToString(), entry.first);
         }
         for (const auto &proofid : requestable) {
             if (!AlreadyHaveProof(proofid)) {
                 addGetDataAndMaybeFlush(MSG_AVA_PROOF, proofid);
                 m_proofrequest.RequestedData(
                     pto->GetId(), proofid,
                     current_time + PROOF_REQUEST_PARAMS.getdata_interval);
             } else {
                 // We have already seen this proof, no need to download.
                 // This is just a belt-and-suspenders, as this should
                 // already be called whenever a proof becomes
                 // AlreadyHaveProof().
                 m_proofrequest.ForgetInvId(proofid);
             }
         }
     } // release cs_proofrequest
 
     //
     // Message: getdata (transactions)
     //
     {
         LOCK(cs_main);
         std::vector<std::pair<NodeId, TxId>> expired;
         auto requestable =
             m_txrequest.GetRequestable(pto->GetId(), current_time, &expired);
         for (const auto &entry : expired) {
             LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n",
                      entry.second.ToString(), entry.first);
         }
         for (const TxId &txid : requestable) {
             if (!AlreadyHaveTx(txid)) {
                 addGetDataAndMaybeFlush(MSG_TX, txid);
                 m_txrequest.RequestedData(
                     pto->GetId(), txid,
                     current_time + TX_REQUEST_PARAMS.getdata_interval);
             } else {
                 // We have already seen this transaction, no need to download.
                 // This is just a belt-and-suspenders, as this should already be
                 // called whenever a transaction becomes AlreadyHaveTx().
                 m_txrequest.ForgetInvId(txid);
             }
         }
 
         if (!vGetData.empty()) {
             m_connman.PushMessage(pto,
                                   msgMaker.Make(NetMsgType::GETDATA, vGetData));
         }
 
     } // release cs_main
     MaybeSendFeefilter(*pto, *peer, current_time);
     return true;
 }
 
 bool PeerManagerImpl::ReceivedAvalancheProof(CNode &node, Peer &peer,
                                              const avalanche::ProofRef &proof) {
     assert(proof != nullptr);
 
     const avalanche::ProofId &proofid = proof->getId();
 
     AddKnownProof(peer, proofid);
 
     if (m_chainman.ActiveChainstate().IsInitialBlockDownload()) {
         // We cannot reliably verify proofs during IBD, so bail out early and
         // keep the inventory as pending so it can be requested when the node
         // has synced.
         return true;
     }
 
     const NodeId nodeid = node.GetId();
 
     const bool isStaker = WITH_LOCK(node.cs_avalanche_pubkey,
                                     return node.m_avalanche_pubkey.has_value());
-    auto saveProofIfStaker = [isStaker](const CNode &node,
-                                        const avalanche::ProofId &proofid,
-                                        const NodeId nodeid) -> bool {
+    auto saveProofIfStaker = [this, isStaker](const CNode &node,
+                                              const avalanche::ProofId &proofid,
+                                              const NodeId nodeid) -> bool {
         if (isStaker) {
-            return g_avalanche->withPeerManager(
+            return m_avalanche->withPeerManager(
                 [&](avalanche::PeerManager &pm) {
                     return pm.saveRemoteProof(proofid, nodeid, true);
                 });
         }
 
         return false;
     };
 
     {
         LOCK(cs_proofrequest);
         m_proofrequest.ReceivedResponse(nodeid, proofid);
 
         if (AlreadyHaveProof(proofid)) {
             m_proofrequest.ForgetInvId(proofid);
             saveProofIfStaker(node, proofid, nodeid);
             return true;
         }
     }
 
     // registerProof should not be called while cs_proofrequest because it
     // holds cs_main and that creates a potential deadlock during shutdown
 
     avalanche::ProofRegistrationState state;
-    if (g_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
+    if (m_avalanche->withPeerManager([&](avalanche::PeerManager &pm) {
             return pm.registerProof(proof, state);
         })) {
         WITH_LOCK(cs_proofrequest, m_proofrequest.ForgetInvId(proofid));
         RelayProof(proofid);
 
         node.m_last_proof_time = GetTime<std::chrono::seconds>();
 
         LogPrint(BCLog::NET, "New avalanche proof: peer=%d, proofid %s\n",
                  nodeid, proofid.ToString());
     }
 
     if (state.GetResult() == avalanche::ProofRegistrationResult::INVALID) {
-        g_avalanche->withPeerManager(
+        m_avalanche->withPeerManager(
             [&](avalanche::PeerManager &pm) { pm.setInvalid(proofid); });
         Misbehaving(peer, 100, state.GetRejectReason());
         return false;
     }
 
     if (state.GetResult() == avalanche::ProofRegistrationResult::MISSING_UTXO) {
         // This is possible that a proof contains a utxo we don't know yet, so
         // don't ban for this.
         return false;
     }
 
-    if (!g_avalanche->reconcileOrFinalize(proof)) {
+    if (!m_avalanche->reconcileOrFinalize(proof)) {
         LogPrint(BCLog::AVALANCHE,
                  "Not polling the avalanche proof (%s): peer=%d, proofid %s\n",
                  state.IsValid() ? "not-worth-polling"
                                  : state.GetRejectReason(),
                  nodeid, proofid.ToString());
     }
 
     saveProofIfStaker(node, proofid, nodeid);
 
     return true;
 }
diff --git a/src/net_processing.h b/src/net_processing.h
index 175bc685c0..686b878256 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -1,122 +1,122 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_NET_PROCESSING_H
 #define BITCOIN_NET_PROCESSING_H
 
 #include <net.h>
 #include <sync.h>
 #include <validationinterface.h>
 
 namespace avalanche {
 struct ProofId;
-}
+class Processor;
+} // namespace avalanche
 
 class AddrMan;
 class CTxMemPool;
 class ChainstateManager;
 class Config;
 
 /**
  * Default for -maxorphantx, maximum number of orphan transactions kept in
  * memory.
  */
 static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
 /**
  * Default number of orphan+recently-replaced txn to keep around for block
  * reconstruction.
  */
 static const unsigned int DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN = 100;
 static const bool DEFAULT_PEERBLOCKFILTERS = false;
 /** Threshold for marking a node to be discouraged, e.g. disconnected and added
  * to the discouragement filter. */
 static const int DISCOURAGEMENT_THRESHOLD{100};
 
 struct CNodeStateStats {
     int nSyncHeight = -1;
     int nCommonHeight = -1;
     int m_starting_height = -1;
     std::chrono::microseconds m_ping_wait;
     std::vector<int> vHeightInFlight;
     bool m_relay_txs;
     Amount m_fee_filter_received;
     uint64_t m_addr_processed = 0;
     uint64_t m_addr_rate_limited = 0;
     bool m_addr_relay_enabled{false};
     ServiceFlags their_services;
     int64_t presync_height{-1};
 };
 
 class PeerManager : public CValidationInterface, public NetEventsInterface {
 public:
-    static std::unique_ptr<PeerManager> make(CConnman &connman,
-                                             AddrMan &addrman, BanMan *banman,
-                                             ChainstateManager &chainman,
-                                             CTxMemPool &pool,
-                                             bool ignore_incoming_txs);
+    static std::unique_ptr<PeerManager>
+    make(CConnman &connman, AddrMan &addrman, BanMan *banman,
+         ChainstateManager &chainman, CTxMemPool &pool,
+         avalanche::Processor *const avalanche, bool ignore_incoming_txs);
     virtual ~PeerManager() {}
 
     /**
      * Attempt to manually fetch block from a given peer. We must already have
      * the header.
      *
      * @param[in]  config       The global config
      * @param[in]  peer_id      The peer id
      * @param[in]  block_index  The block index
      * @returns std::nullopt if a request was successfully made, otherwise an
      *     error message
      */
     virtual std::optional<std::string>
     FetchBlock(const Config &config, NodeId peer_id,
                const CBlockIndex &block_index) = 0;
 
     /** Begin running background tasks, should only be called once */
     virtual void StartScheduledTasks(CScheduler &scheduler) = 0;
 
     /** Get statistics from node state */
     virtual bool GetNodeStateStats(NodeId nodeid,
                                    CNodeStateStats &stats) const = 0;
 
     /** Whether this node ignores txs received over p2p. */
     virtual bool IgnoresIncomingTxs() = 0;
 
     /** Relay transaction to all peers. */
     virtual void RelayTransaction(const TxId &txid) = 0;
 
     /** Relay proof to all peers */
     virtual void RelayProof(const avalanche::ProofId &proofid) = 0;
 
     /** Send ping message to all peers */
     virtual void SendPings() = 0;
 
     /** Set the best height */
     virtual void SetBestHeight(int height) = 0;
 
     /** Public for unit testing. */
     virtual void UnitTestMisbehaving(const NodeId peer_id,
                                      const int howmuch) = 0;
 
     /**
      * Evict extra outbound peers. If we think our tip may be stale, connect to
      * an extra outbound.
      */
     virtual void CheckForStaleTipAndEvictPeers() = 0;
 
     /** Process a single message from a peer. Public for fuzz testing */
     virtual void ProcessMessage(const Config &config, CNode &pfrom,
                                 const std::string &msg_type, CDataStream &vRecv,
                                 const std::chrono::microseconds time_received,
                                 const std::atomic<bool> &interruptMsgProc)
         EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex) = 0;
 
     /**
      * This function is used for testing the stale tip eviction logic, see
      * denialofservice_tests.cpp
      */
     virtual void UpdateLastBlockAnnounceTime(NodeId node,
                                              int64_t time_in_seconds) = 0;
 };
 
 #endif // BITCOIN_NET_PROCESSING_H
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 6200acdc26..ae78733ffa 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -1,324 +1,324 @@
 // Copyright (c) 2011-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 // Unit tests for denial-of-service detection/prevention code
 
 #include <banman.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <common/args.h>
 #include <config.h>
 #include <net.h>
 #include <net_processing.h>
 #include <script/sign.h>
 #include <script/signingprovider.h>
 #include <script/standard.h>
 #include <serialize.h>
 #include <timedata.h>
 #include <util/time.h>
 #include <validation.h>
 
 #include <test/util/net.h>
 #include <test/util/random.h>
 #include <test/util/setup_common.h>
 
 #include <boost/test/unit_test.hpp>
 
 #include <cstdint>
 
 namespace {
 struct CConnmanTest : public CConnman {
     using CConnman::CConnman;
     void AddNode(CNode &node) {
         LOCK(m_nodes_mutex);
         m_nodes.push_back(&node);
     }
     void ClearNodes() {
         LOCK(m_nodes_mutex);
         for (CNode *node : m_nodes) {
             delete node;
         }
         m_nodes.clear();
     }
 };
 } // namespace
 
 static CService ip(uint32_t i) {
     struct in_addr s;
     s.s_addr = i;
     return CService(CNetAddr(s), Params().GetDefaultPort());
 }
 
 static NodeId id = 0;
 
 BOOST_FIXTURE_TEST_SUITE(denialofservice_tests, TestingSetup)
 
 // Test eviction of an outbound peer whose chain never advances
 // Mock a node connection, and use mocktime to simulate a peer which never sends
 // any headers messages. PeerLogic should decide to evict that outbound peer,
 // after the appropriate timeouts.
 // Note that we protect 4 outbound nodes from being subject to this logic; this
 // test takes advantage of that protection only being applied to nodes which
 // send headers with sufficient work.
 BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) {
     LOCK(NetEventsInterface::g_msgproc_mutex);
 
     const Config &config = m_node.chainman->GetConfig();
 
     ConnmanTestMsg &connman = static_cast<ConnmanTestMsg &>(*m_node.connman);
     // Disable inactivity checks for this test to avoid interference
     connman.SetPeerConnectTimeout(99999s);
     PeerManager &peerman = *m_node.peerman;
 
     // Mock an outbound peer
     CAddress addr1(ip(0xa0b0c001), NODE_NONE);
     CNode dummyNode1(id++, INVALID_SOCKET, addr1,
                      /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0,
                      /* nLocalExtraEntropyIn */ 0, CAddress(), /* pszDest */ "",
                      ConnectionType::OUTBOUND_FULL_RELAY,
                      /* inbound_onion */ false);
 
     connman.Handshake(
         /*node=*/dummyNode1,
         /*successfully_connected=*/true,
         /*remote_services=*/ServiceFlags(NODE_NETWORK),
         /*local_services=*/ServiceFlags(NODE_NETWORK),
         /*permission_flags=*/NetPermissionFlags::None,
         /*version=*/PROTOCOL_VERSION,
         /*relay_txs=*/true);
     TestOnlyResetTimeData();
 
     // This test requires that we have a chain with non-zero work.
     {
         LOCK(cs_main);
         BOOST_CHECK(m_node.chainman->ActiveTip() != nullptr);
         BOOST_CHECK(m_node.chainman->ActiveTip()->nChainWork > 0);
     }
 
     // Test starts here
     // should result in getheaders
     BOOST_CHECK(peerman.SendMessages(config, &dummyNode1));
     {
         LOCK(dummyNode1.cs_vSend);
         BOOST_CHECK(dummyNode1.vSendMsg.size() > 0);
         dummyNode1.vSendMsg.clear();
     }
 
     int64_t nStartTime = GetTime();
     // Wait 21 minutes
     SetMockTime(nStartTime + 21 * 60);
     // should result in getheaders
     BOOST_CHECK(peerman.SendMessages(config, &dummyNode1));
 
     {
         LOCK(dummyNode1.cs_vSend);
         BOOST_CHECK(dummyNode1.vSendMsg.size() > 0);
     }
     // Wait 3 more minutes
     SetMockTime(nStartTime + 24 * 60);
     // should result in disconnect
     BOOST_CHECK(peerman.SendMessages(config, &dummyNode1));
     BOOST_CHECK(dummyNode1.fDisconnect == true);
     SetMockTime(0);
 
     peerman.FinalizeNode(config, dummyNode1);
 }
 
 static void AddRandomOutboundPeer(const Config &config,
                                   std::vector<CNode *> &vNodes,
                                   PeerManager &peerLogic,
                                   CConnmanTest *connman) {
     CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE);
     vNodes.emplace_back(new CNode(id++, INVALID_SOCKET, addr,
                                   /* nKeyedNetGroupIn */ 0,
                                   /* nLocalHostNonceIn */ 0,
                                   /* nLocalExtraEntropyIn */ 0, CAddress(),
                                   /* pszDest */ "",
                                   ConnectionType::OUTBOUND_FULL_RELAY,
                                   /* inbound_onion */ false));
     CNode &node = *vNodes.back();
     node.SetCommonVersion(PROTOCOL_VERSION);
 
     peerLogic.InitializeNode(config, node, ServiceFlags(NODE_NETWORK));
     node.fSuccessfullyConnected = true;
 
     connman->AddNode(node);
 }
 
 BOOST_AUTO_TEST_CASE(stale_tip_peer_management) {
     const Config &config = m_node.chainman->GetConfig();
 
     auto connman =
         std::make_unique<CConnmanTest>(config, 0x1337, 0x1337, *m_node.addrman);
     auto peerLogic =
         PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman,
-                          *m_node.mempool, false);
+                          *m_node.mempool, /*avalanche=*/nullptr, false);
 
     const Consensus::Params &consensusParams =
         config.GetChainParams().GetConsensus();
     constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
     CConnman::Options options;
     options.nMaxConnections = DEFAULT_MAX_PEER_CONNECTIONS;
     options.m_max_outbound_full_relay = max_outbound_full_relay;
     options.nMaxFeeler = MAX_FEELER_CONNECTIONS;
 
     const auto time_init{GetTime<std::chrono::seconds>()};
     SetMockTime(time_init.count());
     const auto time_later{
         time_init +
         3 * std::chrono::seconds{consensusParams.nPowTargetSpacing} + 1s};
     connman->Init(options);
     std::vector<CNode *> vNodes;
 
     // Mock some outbound peers
     for (int i = 0; i < max_outbound_full_relay; ++i) {
         AddRandomOutboundPeer(config, vNodes, *peerLogic, connman.get());
     }
 
     peerLogic->CheckForStaleTipAndEvictPeers();
 
     // No nodes should be marked for disconnection while we have no extra peers
     for (const CNode *node : vNodes) {
         BOOST_CHECK(node->fDisconnect == false);
     }
 
     SetMockTime(time_later.count());
 
     // Now tip should definitely be stale, and we should look for an extra
     // outbound peer
     peerLogic->CheckForStaleTipAndEvictPeers();
     BOOST_CHECK(connman->GetTryNewOutboundPeer());
 
     // Still no peers should be marked for disconnection
     for (const CNode *node : vNodes) {
         BOOST_CHECK(node->fDisconnect == false);
     }
 
     // If we add one more peer, something should get marked for eviction
     // on the next check (since we're mocking the time to be in the future, the
     // required time connected check should be satisfied).
     SetMockTime(time_init.count());
     AddRandomOutboundPeer(config, vNodes, *peerLogic, connman.get());
     SetMockTime(time_later.count());
 
     peerLogic->CheckForStaleTipAndEvictPeers();
     for (int i = 0; i < max_outbound_full_relay; ++i) {
         BOOST_CHECK(vNodes[i]->fDisconnect == false);
     }
     // Last added node should get marked for eviction
     BOOST_CHECK(vNodes.back()->fDisconnect == true);
 
     vNodes.back()->fDisconnect = false;
 
     // Update the last announced block time for the last
     // peer, and check that the next newest node gets evicted.
     peerLogic->UpdateLastBlockAnnounceTime(vNodes.back()->GetId(), GetTime());
 
     peerLogic->CheckForStaleTipAndEvictPeers();
     for (int i = 0; i < max_outbound_full_relay - 1; ++i) {
         BOOST_CHECK(vNodes[i]->fDisconnect == false);
     }
     BOOST_CHECK(vNodes[max_outbound_full_relay - 1]->fDisconnect == true);
     BOOST_CHECK(vNodes.back()->fDisconnect == false);
 
     for (const CNode *node : vNodes) {
         peerLogic->FinalizeNode(config, *node);
     }
 
     connman->ClearNodes();
 }
 
 BOOST_AUTO_TEST_CASE(peer_discouragement) {
     LOCK(NetEventsInterface::g_msgproc_mutex);
 
     const Config &config = m_node.chainman->GetConfig();
 
     auto banman = std::make_unique<BanMan>(
         m_args.GetDataDirBase() / "banlist.dat", config.GetChainParams(),
         nullptr, DEFAULT_MISBEHAVING_BANTIME);
     auto connman =
         std::make_unique<CConnman>(config, 0x1337, 0x1337, *m_node.addrman);
-    auto peerLogic =
-        PeerManager::make(*connman, *m_node.addrman, banman.get(),
-                          *m_node.chainman, *m_node.mempool, false);
+    auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(),
+                                       *m_node.chainman, *m_node.mempool,
+                                       /*avalanche=*/nullptr, false);
 
     banman->ClearBanned();
     CAddress addr1(ip(0xa0b0c001), NODE_NONE);
     CNode dummyNode1(id++, INVALID_SOCKET, addr1,
                      /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0,
                      /* nLocalExtraEntropyIn */ 0, CAddress(), /* pszDest */ "",
                      ConnectionType::INBOUND, /* inbound_onion */ false);
     dummyNode1.SetCommonVersion(PROTOCOL_VERSION);
     peerLogic->InitializeNode(config, dummyNode1, NODE_NETWORK);
     dummyNode1.fSuccessfullyConnected = true;
     // Should be discouraged
     peerLogic->UnitTestMisbehaving(dummyNode1.GetId(),
                                    DISCOURAGEMENT_THRESHOLD);
     BOOST_CHECK(peerLogic->SendMessages(config, &dummyNode1));
     BOOST_CHECK(banman->IsDiscouraged(addr1));
     // Different IP, not discouraged
     BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001 | 0x0000ff00)));
 
     CAddress addr2(ip(0xa0b0c002), NODE_NONE);
     CNode dummyNode2(id++, INVALID_SOCKET, addr2,
                      /* nKeyedNetGroupIn */ 1, /* nLocalHostNonceIn */ 1,
                      /* nLocalExtraEntropyIn */ 1, CAddress(),
                      /* pszDest */ "", ConnectionType::INBOUND,
                      /* inbound_onion */ false);
     dummyNode2.SetCommonVersion(PROTOCOL_VERSION);
     peerLogic->InitializeNode(config, dummyNode2, NODE_NETWORK);
     dummyNode2.fSuccessfullyConnected = true;
     peerLogic->UnitTestMisbehaving(dummyNode2.GetId(),
                                    DISCOURAGEMENT_THRESHOLD - 1);
     BOOST_CHECK(peerLogic->SendMessages(config, &dummyNode2));
     // 2 not discouraged yet...
     BOOST_CHECK(!banman->IsDiscouraged(addr2));
     // ... but 1 still should be
     BOOST_CHECK(banman->IsDiscouraged(addr1));
     // 2 reaches discouragement threshold
     peerLogic->UnitTestMisbehaving(dummyNode2.GetId(), 1);
     BOOST_CHECK(peerLogic->SendMessages(config, &dummyNode2));
     BOOST_CHECK(banman->IsDiscouraged(addr1)); // Expect both 1 and 2
     BOOST_CHECK(banman->IsDiscouraged(addr2)); // to be discouraged now
 
     peerLogic->FinalizeNode(config, dummyNode1);
     peerLogic->FinalizeNode(config, dummyNode2);
 }
 
 BOOST_AUTO_TEST_CASE(DoS_bantime) {
     LOCK(NetEventsInterface::g_msgproc_mutex);
 
     const Config &config = m_node.chainman->GetConfig();
 
     auto banman = std::make_unique<BanMan>(
         m_args.GetDataDirBase() / "banlist.dat", config.GetChainParams(),
         nullptr, DEFAULT_MISBEHAVING_BANTIME);
     auto connman =
         std::make_unique<CConnman>(config, 0x1337, 0x1337, *m_node.addrman);
-    auto peerLogic =
-        PeerManager::make(*connman, *m_node.addrman, banman.get(),
-                          *m_node.chainman, *m_node.mempool, false);
+    auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(),
+                                       *m_node.chainman, *m_node.mempool,
+                                       /*avalanche=*/nullptr, false);
 
     banman->ClearBanned();
     int64_t nStartTime = GetTime();
     // Overrides future calls to GetTime()
     SetMockTime(nStartTime);
 
     CAddress addr(ip(0xa0b0c001), NODE_NONE);
     CNode dummyNode(id++, INVALID_SOCKET, addr,
                     /* nKeyedNetGroupIn */ 4, /* nLocalHostNonceIn */ 4,
                     /* nLocalExtraEntropyIn */ 4, CAddress(), /* pszDest */ "",
                     ConnectionType::INBOUND, /* inbound_onion */ false);
     dummyNode.SetCommonVersion(PROTOCOL_VERSION);
     peerLogic->InitializeNode(config, dummyNode, NODE_NETWORK);
     dummyNode.fSuccessfullyConnected = true;
 
     peerLogic->UnitTestMisbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD);
     BOOST_CHECK(peerLogic->SendMessages(config, &dummyNode));
     BOOST_CHECK(banman->IsDiscouraged(addr));
 
     peerLogic->FinalizeNode(config, dummyNode);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 4f31198340..aac65c3f54 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -1,624 +1,624 @@
 // Copyright (c) 2011-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <test/util/setup_common.h>
 
 #include <kernel/mempool_entry.h>
 #include <kernel/validation_cache_sizes.h>
 
 #include <addrman.h>
 #include <banman.h>
 #include <chainparams.h>
 #include <common/system.h>
 #include <config.h>
 #include <consensus/consensus.h>
 #include <consensus/merkle.h>
 #include <consensus/validation.h>
 #include <crypto/sha256.h>
 #include <init.h>
 #include <interfaces/chain.h>
 #include <logging.h>
 #include <mempool_args.h>
 #include <net.h>
 #include <net_processing.h>
 #include <node/blockstorage.h>
 #include <node/chainstate.h>
 #include <node/chainstatemanager_args.h>
 #include <node/context.h>
 #include <node/kernel_notifications.h>
 #include <node/miner.h>
 #include <node/validation_cache_args.h>
 #include <noui.h>
 #include <pow/pow.h>
 #include <random.h>
 #include <rpc/blockchain.h>
 #include <rpc/register.h>
 #include <rpc/server.h>
 #include <scheduler.h>
 #include <script/script_error.h>
 #include <script/scriptcache.h>
 #include <script/sigcache.h>
 #include <shutdown.h>
 #include <streams.h>
 #include <timedata.h>
 #include <txdb.h>
 #include <txmempool.h>
 #include <util/strencodings.h>
 #include <util/thread.h>
 #include <util/threadnames.h>
 #include <util/time.h>
 #include <util/translation.h>
 #include <util/vector.h>
 #include <validation.h>
 #include <validationinterface.h>
 #include <walletinitinterface.h>
 
 #include <test/util/mining.h>
 #include <test/util/random.h>
 
 #include <algorithm>
 #include <functional>
 #include <memory>
 
 using kernel::ValidationCacheSizes;
 using node::ApplyArgsManOptions;
 using node::BlockAssembler;
 using node::BlockManager;
 using node::CalculateCacheSizes;
 using node::fReindex;
 using node::KernelNotifications;
 using node::LoadChainstate;
 using node::NodeContext;
 using node::VerifyLoadedChainstate;
 
 const std::function<std::string(const char *)> G_TRANSLATION_FUN = nullptr;
 
 /**
  * Random context to get unique temp data dirs. Separate from
  * g_insecure_rand_ctx, which can be seeded from a const env var
  */
 static FastRandomContext g_insecure_rand_ctx_temp_path;
 
 std::ostream &operator<<(std::ostream &os, const uint256 &num) {
     os << num.ToString();
     return os;
 }
 
 std::ostream &operator<<(std::ostream &os, const ScriptError &err) {
     os << ScriptErrorString(err);
     return os;
 }
 
 std::vector<const char *> fixture_extra_args{};
 
 BasicTestingSetup::BasicTestingSetup(
     const std::string &chainName, const std::vector<const char *> &extra_args)
     : m_path_root{fsbridge::GetTempDirectoryPath() /
                   "test_common_" PACKAGE_NAME /
                   g_insecure_rand_ctx_temp_path.rand256().ToString()},
       m_args{} {
     // clang-format off
     std::vector<const char *> arguments = Cat(
         {
             "dummy",
             "-printtoconsole=0",
             "-logsourcelocations",
             "-logtimemicros",
             "-debug",
             "-debugexclude=libevent",
             "-debugexclude=leveldb",
         },
         extra_args);
     // clang-format on
     arguments = Cat(arguments, fixture_extra_args);
     auto &config = const_cast<Config &>(GetConfig());
     SetMockTime(0);
     fs::create_directories(m_path_root);
     m_args.ForceSetArg("-datadir", fs::PathToString(m_path_root));
     gArgs.ForceSetArg("-datadir", fs::PathToString(m_path_root));
     gArgs.ClearPathCache();
     {
         SetupServerArgs(m_node);
         std::string error;
         const bool success{m_node.args->ParseParameters(
             arguments.size(), arguments.data(), error)};
         assert(success);
         assert(error.empty());
     }
     SelectParams(chainName);
     SeedInsecureRand();
     InitLogging(*m_node.args);
     AppInitParameterInteraction(config, *m_node.args);
     LogInstance().StartLogging();
     SHA256AutoDetect();
     ECC_Start();
     SetupEnvironment();
     SetupNetworking();
 
     ValidationCacheSizes validation_cache_sizes{};
     ApplyArgsManOptions(*m_node.args, validation_cache_sizes);
     Assert(InitSignatureCache(validation_cache_sizes.signature_cache_bytes));
     Assert(InitScriptExecutionCache(
         validation_cache_sizes.script_execution_cache_bytes));
 
     m_node.chain = interfaces::MakeChain(m_node, config.GetChainParams());
     g_wallet_init_interface.Construct(m_node);
 
     static bool noui_connected = false;
     if (!noui_connected) {
         noui_connect();
         noui_connected = true;
     }
 }
 
 BasicTestingSetup::~BasicTestingSetup() {
     LogInstance().DisconnectTestLogger();
     fs::remove_all(m_path_root);
     gArgs.ClearArgs();
     ECC_Stop();
 }
 CTxMemPool::Options MemPoolOptionsForTest(const NodeContext &node) {
     CTxMemPool::Options mempool_opts{
         // Default to always checking mempool regardless of
         // chainparams.DefaultConsistencyChecks for tests
         .check_ratio = 1,
     };
     const auto err{ApplyArgsManOptions(
         *node.args, ::GetConfig().GetChainParams(), mempool_opts)};
     Assert(!err);
     return mempool_opts;
 }
 
 ChainTestingSetup::ChainTestingSetup(
     const std::string &chainName, const std::vector<const char *> &extra_args)
     : BasicTestingSetup(chainName, extra_args) {
     const Config &config = GetConfig();
 
     // We have to run a scheduler thread to prevent ActivateBestChain
     // from blocking due to queue overrun.
     m_node.scheduler = std::make_unique<CScheduler>();
     m_node.scheduler->m_service_thread =
         std::thread(util::TraceThread, "scheduler",
                     [&] { m_node.scheduler->serviceQueue(); });
     GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler);
 
     m_node.mempool =
         std::make_unique<CTxMemPool>(MemPoolOptionsForTest(m_node));
 
     m_cache_sizes = CalculateCacheSizes(m_args);
 
     m_node.notifications = std::make_unique<KernelNotifications>();
 
     ChainstateManager::Options chainman_opts{
         .config = config,
         .datadir = m_args.GetDataDirNet(),
         .adjusted_time_callback = GetAdjustedTime,
         .check_block_index = true,
         .notifications = *m_node.notifications,
     };
     ApplyArgsManOptions(*m_node.args, chainman_opts);
     const BlockManager::Options blockman_opts{
         .chainparams = chainman_opts.config.GetChainParams(),
         .blocks_dir = m_args.GetBlocksDirPath(),
     };
     m_node.chainman =
         std::make_unique<ChainstateManager>(chainman_opts, blockman_opts);
     m_node.chainman->m_blockman.m_block_tree_db =
         std::make_unique<CBlockTreeDB>(DBParams{
             .path = m_args.GetDataDirNet() / "blocks" / "index",
             .cache_bytes = static_cast<size_t>(m_cache_sizes.block_tree_db),
             .memory_only = true});
     // Call Upgrade on the block database so that the version field is set,
     // else LoadBlockIndexGuts will fail (see D8319).
     m_node.chainman->m_blockman.m_block_tree_db->Upgrade();
 
     constexpr int script_check_threads = 2;
     StartScriptCheckWorkerThreads(script_check_threads);
 }
 
 ChainTestingSetup::~ChainTestingSetup() {
     if (m_node.scheduler) {
         m_node.scheduler->stop();
     }
     StopScriptCheckWorkerThreads();
     GetMainSignals().FlushBackgroundCallbacks();
     GetMainSignals().UnregisterBackgroundSignalScheduler();
     m_node.connman.reset();
     m_node.banman.reset();
     m_node.addrman.reset();
     m_node.args = nullptr;
     m_node.mempool.reset();
     m_node.scheduler.reset();
     m_node.chainman.reset();
 }
 
 void TestingSetup::LoadVerifyActivateChainstate() {
     auto &chainman{*Assert(m_node.chainman)};
     node::ChainstateLoadOptions options;
     options.mempool = Assert(m_node.mempool.get());
     options.block_tree_db_in_memory = m_block_tree_db_in_memory;
     options.coins_db_in_memory = m_coins_db_in_memory;
     options.reindex = node::fReindex;
     options.reindex_chainstate =
         m_args.GetBoolArg("-reindex-chainstate", false);
     options.prune = chainman.m_blockman.IsPruneMode();
     options.check_blocks =
         m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
     options.check_level = m_args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL);
     options.require_full_verification =
         m_args.IsArgSet("-checkblocks") || m_args.IsArgSet("-checklevel");
     auto [status, error] = LoadChainstate(chainman, m_cache_sizes, options);
     assert(status == node::ChainstateLoadStatus::SUCCESS);
 
     std::tie(status, error) = VerifyLoadedChainstate(chainman, options);
     assert(status == node::ChainstateLoadStatus::SUCCESS);
 
     BlockValidationState state;
     if (!chainman.ActiveChainstate().ActivateBestChain(state)) {
         throw std::runtime_error(
             strprintf("ActivateBestChain failed. (%s)", state.ToString()));
     }
 }
 
 TestingSetup::TestingSetup(const std::string &chainName,
                            const std::vector<const char *> &extra_args,
                            const bool coins_db_in_memory,
                            const bool block_tree_db_in_memory)
     : ChainTestingSetup(chainName, extra_args),
       m_coins_db_in_memory(coins_db_in_memory),
       m_block_tree_db_in_memory(block_tree_db_in_memory) {
     const Config &config = GetConfig();
 
     // Ideally we'd move all the RPC tests to the functional testing framework
     // instead of unit tests, but for now we need these here.
     RPCServer rpcServer;
     RegisterAllRPCCommands(config, rpcServer, tableRPC);
 
     /**
      * RPC does not come out of the warmup state on its own. Normally, this is
      * handled in bitcoind's init path, but unit tests do not trigger this
      * codepath, so we call it explicitly as part of setup.
      */
     std::string rpcWarmupStatus;
     if (RPCIsInWarmup(&rpcWarmupStatus)) {
         SetRPCWarmupFinished();
     }
 
     LoadVerifyActivateChainstate();
 
     m_node.addrman = std::make_unique<AddrMan>(
         /* asmap= */ std::vector<bool>(), /* consistency_check_ratio= */ 0);
     m_node.banman = std::make_unique<BanMan>(
         m_args.GetDataDirBase() / "banlist.dat", config.GetChainParams(),
         nullptr, DEFAULT_MISBEHAVING_BANTIME);
     // Deterministic randomness for tests.
     m_node.connman =
         std::make_unique<CConnman>(config, 0x1337, 0x1337, *m_node.addrman);
-    m_node.peerman =
-        PeerManager::make(*m_node.connman, *m_node.addrman, m_node.banman.get(),
-                          *m_node.chainman, *m_node.mempool, false);
+    m_node.peerman = PeerManager::make(
+        *m_node.connman, *m_node.addrman, m_node.banman.get(), *m_node.chainman,
+        *m_node.mempool, /*avalanche=*/nullptr, false);
     {
         CConnman::Options options;
         options.m_msgproc.push_back(m_node.peerman.get());
         m_node.connman->Init(options);
     }
 }
 
 TestChain100Setup::TestChain100Setup(
     const std::string &chain_name, const std::vector<const char *> &extra_args,
     const bool coins_db_in_memory, const bool block_tree_db_in_memory)
     : TestingSetup{CBaseChainParams::REGTEST, extra_args, coins_db_in_memory,
                    block_tree_db_in_memory} {
     SetMockTime(1598887952);
     constexpr std::array<uint8_t, 32> vchKey = {
         {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
          0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}};
     coinbaseKey.Set(vchKey.begin(), vchKey.end(), true);
 
     // Generate a 100-block chain:
     this->mineBlocks(COINBASE_MATURITY);
 
     {
         LOCK(::cs_main);
         assert(
             m_node.chainman->ActiveTip()->GetBlockHash().ToString() ==
             "5afde277a26b6f36aee8f61a1dbf755587e1c6be63e654a88abe2a1ff0fbfb05");
     }
 }
 
 void TestChain100Setup::mineBlocks(int num_blocks) {
     CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey())
                                      << OP_CHECKSIG;
     for (int i = 0; i < num_blocks; i++) {
         std::vector<CMutableTransaction> noTxns;
         CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey);
         SetMockTime(GetTime() + 1);
         m_coinbase_txns.push_back(b.vtx[0]);
     }
 }
 
 CBlock
 TestChain100Setup::CreateBlock(const std::vector<CMutableTransaction> &txns,
                                const CScript &scriptPubKey,
                                Chainstate &chainstate) {
     const Config &config = GetConfig();
     CBlock block = BlockAssembler{config, chainstate, nullptr}
                        .CreateNewBlock(scriptPubKey)
                        ->block;
 
     Assert(block.vtx.size() == 1);
     for (const CMutableTransaction &tx : txns) {
         block.vtx.push_back(MakeTransactionRef(tx));
     }
 
     // Order transactions by canonical order
     std::sort(std::begin(block.vtx) + 1, std::end(block.vtx),
               [](const std::shared_ptr<const CTransaction> &txa,
                  const std::shared_ptr<const CTransaction> &txb) -> bool {
                   return txa->GetId() < txb->GetId();
               });
 
     createCoinbaseAndMerkleRoot(&block,
                                 WITH_LOCK(m_node.chainman->GetMutex(),
                                           return m_node.chainman->ActiveTip()),
                                 config.GetMaxBlockSize());
 
     const Consensus::Params &params = config.GetChainParams().GetConsensus();
     while (!CheckProofOfWork(block.GetHash(), block.nBits, params)) {
         ++block.nNonce;
     }
 
     return block;
 }
 
 CBlock TestChain100Setup::CreateAndProcessBlock(
     const std::vector<CMutableTransaction> &txns, const CScript &scriptPubKey,
     Chainstate *chainstate) {
     if (!chainstate) {
         chainstate = &Assert(m_node.chainman)->ActiveChainstate();
     }
 
     const CBlock block = this->CreateBlock(txns, scriptPubKey, *chainstate);
     std::shared_ptr<const CBlock> shared_pblock =
         std::make_shared<const CBlock>(block);
     Assert(m_node.chainman)
         ->ProcessNewBlock(shared_pblock, true, true, nullptr);
 
     return block;
 }
 
 CMutableTransaction TestChain100Setup::CreateValidMempoolTransaction(
     CTransactionRef input_transaction, int input_vout, int input_height,
     CKey input_signing_key, CScript output_destination, Amount output_amount,
     bool submit) {
     // Transaction we will submit to the mempool
     CMutableTransaction mempool_txn;
 
     // Create an input
     COutPoint outpoint_to_spend(input_transaction->GetId(), input_vout);
     CTxIn input(outpoint_to_spend);
     mempool_txn.vin.push_back(input);
 
     // Create an output
     CTxOut output(output_amount, output_destination);
     mempool_txn.vout.push_back(output);
 
     // Sign the transaction
     // - Add the signing key to a keystore
     FillableSigningProvider keystore;
     keystore.AddKey(input_signing_key);
     // - Populate a CoinsViewCache with the unspent output
     CCoinsView coins_view;
     CCoinsViewCache coins_cache(&coins_view);
     AddCoins(coins_cache, *input_transaction.get(), input_height);
     // - Use GetCoin to properly populate utxo_to_spend,
     Coin utxo_to_spend;
     assert(coins_cache.GetCoin(outpoint_to_spend, utxo_to_spend));
     // - Then add it to a map to pass in to SignTransaction
     std::map<COutPoint, Coin> input_coins;
     input_coins.insert({outpoint_to_spend, utxo_to_spend});
     // - Default signature hashing type
     SigHashType nHashType = SigHashType().withForkId();
     std::map<int, std::string> input_errors;
     assert(SignTransaction(mempool_txn, &keystore, input_coins, nHashType,
                            input_errors));
 
     // If submit=true, add transaction to the mempool.
     if (submit) {
         LOCK(cs_main);
         const MempoolAcceptResult result = m_node.chainman->ProcessTransaction(
             MakeTransactionRef(mempool_txn));
         assert(result.m_result_type == MempoolAcceptResult::ResultType::VALID);
     }
 
     return mempool_txn;
 }
 
 TestChain100Setup::~TestChain100Setup() {
     SetMockTime(0);
 }
 
 std::vector<CTransactionRef>
 TestChain100Setup::PopulateMempool(FastRandomContext &det_rand,
                                    size_t num_transactions, bool submit) {
     std::vector<CTransactionRef> mempool_transactions;
     std::deque<std::pair<COutPoint, Amount>> unspent_prevouts;
     std::transform(m_coinbase_txns.begin(), m_coinbase_txns.end(),
                    std::back_inserter(unspent_prevouts), [](const auto &tx) {
                        return std::make_pair(COutPoint(tx->GetId(), 0),
                                              tx->vout[0].nValue);
                    });
     while (num_transactions > 0 && !unspent_prevouts.empty()) {
         // The number of inputs and outputs are random, between 1 and 24.
         CMutableTransaction mtx = CMutableTransaction();
         const size_t num_inputs = det_rand.randrange(24) + 1;
         Amount total_in{Amount::zero()};
         for (size_t n{0}; n < num_inputs; ++n) {
             if (unspent_prevouts.empty()) {
                 break;
             }
             const auto &[prevout, amount] = unspent_prevouts.front();
             mtx.vin.push_back(CTxIn(prevout, CScript()));
             total_in += amount;
             unspent_prevouts.pop_front();
         }
         const size_t num_outputs = det_rand.randrange(24) + 1;
         // Approximately 1000sat "fee," equal output amounts.
         const Amount amount_per_output =
             (total_in - 1000 * SATOSHI) / int(num_outputs);
         for (size_t n{0}; n < num_outputs; ++n) {
             CScript spk = CScript() << CScriptNum(num_transactions + n);
             mtx.vout.push_back(CTxOut(amount_per_output, spk));
         }
         CTransactionRef ptx = MakeTransactionRef(mtx);
         mempool_transactions.push_back(ptx);
         if (amount_per_output > 2000 * SATOSHI) {
             // If the value is high enough to fund another transaction + fees,
             // keep track of it so it can be used to build a more complex
             // transaction graph. Insert randomly into unspent_prevouts for
             // extra randomness in the resulting structures.
             for (size_t n{0}; n < num_outputs; ++n) {
                 unspent_prevouts.push_back(std::make_pair(
                     COutPoint(ptx->GetId(), n), amount_per_output));
                 std::swap(unspent_prevouts.back(),
                           unspent_prevouts[det_rand.randrange(
                               unspent_prevouts.size())]);
             }
         }
         if (submit) {
             LOCK2(m_node.mempool->cs, cs_main);
             LockPoints lp;
             m_node.mempool->addUnchecked(
                 CTxMemPoolEntryRef::make(ptx, 1000 * SATOSHI, 0, 1, 4, lp));
         }
         --num_transactions;
     }
     return mempool_transactions;
 }
 
 CTxMemPoolEntryRef
 TestMemPoolEntryHelper::FromTx(const CMutableTransaction &tx) const {
     return FromTx(MakeTransactionRef(tx));
 }
 
 CTxMemPoolEntryRef
 TestMemPoolEntryHelper::FromTx(const CTransactionRef &tx) const {
     CTxMemPoolEntry ret(tx, nFee, nTime, nHeight, nSigChecks, LockPoints());
     ret.SetEntryId(entryId);
     return CTxMemPoolEntryRef::make(std::move(ret));
 }
 
 /**
  * @returns a real block
  * (0000000000013b8ab2cd513b0261a14096412195a72a0c4827d229dcc7e0f7af) with 9
  * txs.
  */
 CBlock getBlock13b8a() {
     CBlock block;
     CDataStream stream(
         ParseHex(
             "0100000090f0a9f110702f808219ebea1173056042a714bad51b916cb680000000"
             "0000005275289558f51c9966699404ae2294730c3c9f9bda53523ce50e9b95e558"
             "da2fdb261b4d4c86041b1ab1bf9309010000000100000000000000000000000000"
             "00000000000000000000000000000000000000ffffffff07044c86041b0146ffff"
             "ffff0100f2052a01000000434104e18f7afbe4721580e81e8414fc8c24d7cfacf2"
             "54bb5c7b949450c3e997c2dc1242487a8169507b631eb3771f2b425483fb13102c"
             "4eb5d858eef260fe70fbfae0ac00000000010000000196608ccbafa16abada9027"
             "80da4dc35dafd7af05fa0da08cf833575f8cf9e836000000004a493046022100da"
             "b24889213caf43ae6adc41cf1c9396c08240c199f5225acf45416330fd7dbd0221"
             "00fe37900e0644bf574493a07fc5edba06dbc07c311b947520c2d514bc5725dcb4"
             "01ffffffff0100f2052a010000001976a914f15d1921f52e4007b146dfa60f369e"
             "d2fc393ce288ac000000000100000001fb766c1288458c2bafcfec81e48b24d98e"
             "c706de6b8af7c4e3c29419bfacb56d000000008c493046022100f268ba165ce0ad"
             "2e6d93f089cfcd3785de5c963bb5ea6b8c1b23f1ce3e517b9f022100da7c0f21ad"
             "c6c401887f2bfd1922f11d76159cbc597fbd756a23dcbb00f4d7290141042b4e86"
             "25a96127826915a5b109852636ad0da753c9e1d5606a50480cd0c40f1f8b8d8982"
             "35e571fe9357d9ec842bc4bba1827daaf4de06d71844d0057707966affffffff02"
             "80969800000000001976a9146963907531db72d0ed1a0cfb471ccb63923446f388"
             "ac80d6e34c000000001976a914f0688ba1c0d1ce182c7af6741e02658c7d4dfcd3"
             "88ac000000000100000002c40297f730dd7b5a99567eb8d27b78758f607507c522"
             "92d02d4031895b52f2ff010000008b483045022100f7edfd4b0aac404e5bab4fd3"
             "889e0c6c41aa8d0e6fa122316f68eddd0a65013902205b09cc8b2d56e1cd1f7f2f"
             "afd60a129ed94504c4ac7bdc67b56fe67512658b3e014104732012cb962afa90d3"
             "1b25d8fb0e32c94e513ab7a17805c14ca4c3423e18b4fb5d0e676841733cb83aba"
             "f975845c9f6f2a8097b7d04f4908b18368d6fc2d68ecffffffffca5065ff9617cb"
             "cba45eb23726df6498a9b9cafed4f54cbab9d227b0035ddefb000000008a473044"
             "022068010362a13c7f9919fa832b2dee4e788f61f6f5d344a7c2a0da6ae7406056"
             "58022006d1af525b9a14a35c003b78b72bd59738cd676f845d1ff3fc25049e0100"
             "3614014104732012cb962afa90d31b25d8fb0e32c94e513ab7a17805c14ca4c342"
             "3e18b4fb5d0e676841733cb83abaf975845c9f6f2a8097b7d04f4908b18368d6fc"
             "2d68ecffffffff01001ec4110200000043410469ab4181eceb28985b9b4e895c13"
             "fa5e68d85761b7eee311db5addef76fa8621865134a221bd01f28ec9999ee3e021"
             "e60766e9d1f3458c115fb28650605f11c9ac000000000100000001cdaf2f758e91"
             "c514655e2dc50633d1e4c84989f8aa90a0dbc883f0d23ed5c2fa010000008b4830"
             "4502207ab51be6f12a1962ba0aaaf24a20e0b69b27a94fac5adf45aa7d2d18ffd9"
             "236102210086ae728b370e5329eead9accd880d0cb070aea0c96255fae6c4f1ddc"
             "ce1fd56e014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d78990"
             "4f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8"
             "ebbb12dcd4ffffffff02404b4c00000000001976a9142b6ba7c9d796b75eef7942"
             "fc9288edd37c32f5c388ac002d3101000000001976a9141befba0cdc1ad5652937"
             "1864d9f6cb042faa06b588ac000000000100000001b4a47603e71b61bc3326efd9"
             "0111bf02d2f549b067f4c4a8fa183b57a0f800cb010000008a4730440220177c37"
             "f9a505c3f1a1f0ce2da777c339bd8339ffa02c7cb41f0a5804f473c9230220585b"
             "25a2ee80eb59292e52b987dad92acb0c64eced92ed9ee105ad153cdb12d0014104"
             "43bd44f683467e549dae7d20d1d79cbdb6df985c6e9c029c8d0c6cb46cc1a4d3cf"
             "7923c5021b27f7a0b562ada113bc85d5fda5a1b41e87fe6e8802817cf69996ffff"
             "ffff0280651406000000001976a9145505614859643ab7b547cd7f1f5e7e2a1232"
             "2d3788ac00aa0271000000001976a914ea4720a7a52fc166c55ff2298e07baf70a"
             "e67e1b88ac00000000010000000586c62cd602d219bb60edb14a3e204de0705176"
             "f9022fe49a538054fb14abb49e010000008c493046022100f2bc2aba2534becbdf"
             "062eb993853a42bbbc282083d0daf9b4b585bd401aa8c9022100b1d7fd7ee0b956"
             "00db8535bbf331b19eed8d961f7a8e54159c53675d5f69df8c014104462e76fd40"
             "67b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c6"
             "9b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffff03ad0e"
             "58ccdac3df9dc28a218bcf6f1997b0a93306faaa4b3a28ae83447b217901000000"
             "8b483045022100be12b2937179da88599e27bb31c3525097a07cdb52422d165b3c"
             "a2f2020ffcf702200971b51f853a53d644ebae9ec8f3512e442b1bcb6c315a5b49"
             "1d119d10624c83014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33"
             "d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312e"
             "f1c0e8ebbb12dcd4ffffffff2acfcab629bbc8685792603762c921580030ba144a"
             "f553d271716a95089e107b010000008b483045022100fa579a840ac258871365dd"
             "48cd7552f96c8eea69bd00d84f05b283a0dab311e102207e3c0ee9234814cfbb1b"
             "659b83671618f45abc1326b9edcc77d552a4f2a805c0014104462e76fd4067b3a0"
             "aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc3"
             "1895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffffdcdc6023bbc9"
             "944a658ddc588e61eacb737ddf0a3cd24f113b5a8634c517fcd2000000008b4830"
             "450221008d6df731df5d32267954bd7d2dda2302b74c6c2a6aa5c0ca64ecbabc1a"
             "f03c75022010e55c571d65da7701ae2da1956c442df81bbf076cdbac25133f99d9"
             "8a9ed34c014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d78990"
             "4f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8"
             "ebbb12dcd4ffffffffe15557cd5ce258f479dfd6dc6514edf6d7ed5b21fcfa4a03"
             "8fd69f06b83ac76e010000008b483045022023b3e0ab071eb11de2eb1cc3a67261"
             "b866f86bf6867d4558165f7c8c8aca2d86022100dc6e1f53a91de3efe8f6351285"
             "0811f26284b62f850c70ca73ed5de8771fb451014104462e76fd4067b3a0aa4207"
             "0082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0"
             "c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffff01404b4c0000000000"
             "1976a9142b6ba7c9d796b75eef7942fc9288edd37c32f5c388ac00000000010000"
             "000166d7577163c932b4f9690ca6a80b6e4eb001f0a2fa9023df5595602aae96ed"
             "8d000000008a4730440220262b42546302dfb654a229cefc86432b89628ff259dc"
             "87edd1154535b16a67e102207b4634c020a97c3e7bbd0d4d19da6aa2269ad9dded"
             "4026e896b213d73ca4b63f014104979b82d02226b3a4597523845754d44f13639e"
             "3bf2df5e82c6aab2bdc79687368b01b1ab8b19875ae3c90d661a3d0a33161dab29"
             "934edeb36aa01976be3baf8affffffff02404b4c00000000001976a9144854e695"
             "a02af0aeacb823ccbc272134561e0a1688ac40420f00000000001976a914abee93"
             "376d6b37b5c2940655a6fcaf1c8e74237988ac0000000001000000014e3f8ef2e9"
             "1349a9059cb4f01e54ab2597c1387161d3da89919f7ea6acdbb371010000008c49"
             "304602210081f3183471a5ca22307c0800226f3ef9c353069e0773ac76bb580654"
             "d56aa523022100d4c56465bdc069060846f4fbf2f6b20520b2a80b08b168b31e66"
             "ddb9c694e240014104976c79848e18251612f8940875b2b08d06e6dc73b9840e88"
             "60c066b7e87432c477e9a59a453e71e6d76d5fe34058b800a098fc1740ce3012e8"
             "fc8a00c96af966ffffffff02c0e1e400000000001976a9144134e75a6fcb604203"
             "4aab5e18570cf1f844f54788ac404b4c00000000001976a9142b6ba7c9d796b75e"
             "ef7942fc9288edd37c32f5c388ac00000000"),
         SER_NETWORK, PROTOCOL_VERSION);
     stream >> block;
     return block;
 }
 
 DummyConfig::DummyConfig()
     : chainParams(CreateChainParams(ArgsManager{}, CBaseChainParams::REGTEST)) {
 }
 
 DummyConfig::DummyConfig(std::string net)
     : chainParams(CreateChainParams(ArgsManager{}, net)) {}