diff --git a/src/init.cpp b/src/init.cpp
index d3541fb75..265d1562c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1,2779 +1,2777 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2018 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #if defined(HAVE_CONFIG_H)
 #include <config/bitcoin-config.h>
 #endif
 
 #include <init.h>
 
 #include <addrman.h>
 #include <amount.h>
 #include <avalanche.h>
 #include <banman.h>
 #include <blockfilter.h>
 #include <chain.h>
 #include <chainparams.h>
 #include <checkpoints.h>
 #include <compat/sanity.h>
 #include <config.h>
 #include <consensus/validation.h>
 #include <flatfile.h>
 #include <fs.h>
 #include <httprpc.h>
 #include <httpserver.h>
 #include <index/blockfilterindex.h>
 #include <index/txindex.h>
 #include <interfaces/chain.h>
 #include <key.h>
 #include <miner.h>
 #include <net.h>
 #include <net_permissions.h>
 #include <net_processing.h>
 #include <netbase.h>
 #include <node/context.h>
 #include <policy/mempool.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <rpc/blockchain.h>
 #include <rpc/register.h>
 #include <rpc/server.h>
 #include <rpc/util.h>
 #include <scheduler.h>
 #include <script/scriptcache.h>
 #include <script/sigcache.h>
 #include <script/standard.h>
 #include <shutdown.h>
 #include <timedata.h>
 #include <torcontrol.h>
 #include <txdb.h>
 #include <txmempool.h>
 #include <ui_interface.h>
 #include <util/moneystr.h>
 #include <util/system.h>
 #include <util/threadnames.h>
 #include <util/translation.h>
 #include <util/validation.h>
 #include <validation.h>
 #include <validationinterface.h>
 #include <walletinitinterface.h>
 
 #include <boost/algorithm/string/classification.hpp>
 #include <boost/algorithm/string/replace.hpp>
 #include <boost/algorithm/string/split.hpp>
 #include <boost/thread.hpp>
 
 #if ENABLE_ZMQ
 #include <zmq/zmqnotificationinterface.h>
 #include <zmq/zmqrpc.h>
 #endif
 
 #ifndef WIN32
 #include <attributes.h>
 #include <cerrno>
 #include <csignal>
 #include <sys/stat.h>
 #endif
 #include <cstdint>
 #include <cstdio>
 #include <memory>
 
 static const bool DEFAULT_PROXYRANDOMIZE = true;
 static const bool DEFAULT_REST_ENABLE = false;
 static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
 
 // Dump addresses to banlist.dat every 15 minutes (900s)
 static constexpr int DUMP_BANS_INTERVAL = 60 * 15;
 
 #ifdef WIN32
 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing
 // block files don't count towards the fd_set size limit anyway.
 #define MIN_CORE_FILEDESCRIPTORS 0
 #else
 #define MIN_CORE_FILEDESCRIPTORS 150
 #endif
 
 /**
  * The PID file facilities.
  */
 static const char *BITCOIN_PID_FILENAME = "bitcoind.pid";
 
 static fs::path GetPidFile() {
     return AbsPathForConfigVal(
         fs::path(gArgs.GetArg("-pid", BITCOIN_PID_FILENAME)));
 }
 
 NODISCARD static bool CreatePidFile() {
     fsbridge::ofstream file{GetPidFile()};
     if (file) {
 #ifdef WIN32
         tfm::format(file, "%d\n", GetCurrentProcessId());
 #else
         tfm::format(file, "%d\n", getpid());
 #endif
         return true;
     } else {
         return InitError(
             strprintf(_("Unable to create the PID file '%s': %s").translated,
                       GetPidFile().string(), std::strerror(errno)));
     }
 }
 
 //////////////////////////////////////////////////////////////////////////////
 //
 // Shutdown
 //
 
 //
 // Thread management and startup/shutdown:
 //
 // The network-processing threads are all part of a thread group created by
 // AppInit() or the Qt main() function.
 //
 // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets
 // ShutdownRequested(), which triggers the DetectShutdownThread(), which
 // interrupts the main thread group. DetectShutdownThread() then exits, which
 // causes AppInit() to continue (it .joins the shutdown thread). Shutdown() is
 // then called to clean up database connections, and stop other threads that
 // should only be stopped after the main network-processing threads have exited.
 //
 // Shutdown for Qt is very similar, only it uses a QTimer to detect
 // ShutdownRequested() getting set, and then does the normal Qt shutdown thing.
 //
 
 /**
  * This is a minimally invasive approach to shutdown on LevelDB read errors from
  * the chainstate, while keeping user interface out of the common library, which
  * is shared between bitcoind, and bitcoin-qt and non-server tools.
  */
 class CCoinsViewErrorCatcher final : public CCoinsViewBacked {
 public:
     explicit CCoinsViewErrorCatcher(CCoinsView *view)
         : CCoinsViewBacked(view) {}
     bool GetCoin(const COutPoint &outpoint, Coin &coin) const override {
         try {
             return CCoinsViewBacked::GetCoin(outpoint, coin);
         } catch (const std::runtime_error &e) {
             uiInterface.ThreadSafeMessageBox(
                 _("Error reading from database, shutting down.").translated, "",
                 CClientUIInterface::MSG_ERROR);
             LogPrintf("Error reading from database: %s\n", e.what());
             // Starting the shutdown sequence and returning false to the caller
             // would be interpreted as 'entry not found' (as opposed to unable
             // to read data), and could lead to invalid interpretation. Just
             // exit immediately, as we can't continue anyway, and all writes
             // should be atomic.
             abort();
         }
     }
     // Writes do not need similar protection, as failure to write is handled by
     // the caller.
 };
 
 static std::unique_ptr<CCoinsViewErrorCatcher> pcoinscatcher;
 static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle;
 
 static boost::thread_group threadGroup;
 static CScheduler scheduler;
 
 void Interrupt(NodeContext &node) {
     InterruptHTTPServer();
     InterruptHTTPRPC();
     InterruptRPC();
     InterruptREST();
     InterruptTorControl();
     InterruptMapPort();
     if (g_avalanche) {
         // Avalanche needs to be stopped before we interrupt the thread group as
         // the scheduler will stop working then.
         g_avalanche->stopEventLoop();
     }
     if (node.connman) {
         node.connman->Interrupt();
     }
     if (g_txindex) {
         g_txindex->Interrupt();
     }
     ForEachBlockFilterIndex([](BlockFilterIndex &index) { index.Interrupt(); });
 }
 
 void Shutdown(NodeContext &node) {
     LogPrintf("%s: In progress...\n", __func__);
     static RecursiveMutex cs_Shutdown;
     TRY_LOCK(cs_Shutdown, lockShutdown);
     if (!lockShutdown) {
         return;
     }
 
     /// Note: Shutdown() must be able to handle cases in which initialization
     /// failed part of the way, for example if the data directory was found to
     /// be locked. Be sure that anything that writes files or flushes caches
     /// only does this if the respective module was initialized.
     util::ThreadRename("shutoff");
     g_mempool.AddTransactionsUpdated(1);
 
     StopHTTPRPC();
     StopREST();
     StopRPC();
     StopHTTPServer();
     for (const auto &client : node.chain_clients) {
         client->flush();
     }
     StopMapPort();
 
     // Because avalanche and the network depend on each other, it is important
     // to shut them down in this order:
     // 1. Stop avalanche event loop.
     // 2. Shutdown network processing.
     // 3. Destroy AvalancheProcessor.
     // 4. Destroy CConnman
     if (g_avalanche) {
         g_avalanche->stopEventLoop();
     }
 
     // Because these depend on each-other, we make sure that neither can be
     // using the other before destroying them.
     if (node.peer_logic) {
         UnregisterValidationInterface(node.peer_logic.get());
     }
     if (node.connman) {
         node.connman->Stop();
     }
     if (g_txindex) {
         g_txindex->Stop();
     }
     ForEachBlockFilterIndex([](BlockFilterIndex &index) { index.Stop(); });
 
     StopTorControl();
 
     // After everything has been shut down, but before things get flushed, stop
     // the CScheduler/checkqueue threadGroup
     threadGroup.interrupt_all();
     threadGroup.join_all();
 
     // After the threads that potentially access these pointers have been
     // stopped, destruct and reset all to nullptr.
     node.peer_logic.reset();
 
     // Destroy various global instances
     g_avalanche.reset();
     node.connman.reset();
     node.banman.reset();
     g_txindex.reset();
     DestroyAllBlockFilterIndexes();
 
     if (::g_mempool.IsLoaded() &&
         gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
         DumpMempool(::g_mempool);
     }
 
     // FlushStateToDisk generates a ChainStateFlushed callback, which we should
     // avoid missing
     if (pcoinsTip != nullptr) {
         ::ChainstateActive().ForceFlushStateToDisk();
     }
 
     // After there are no more peers/RPC left to give us new data which may
     // generate CValidationInterface callbacks, flush them...
     GetMainSignals().FlushBackgroundCallbacks();
 
     // Any future callbacks will be dropped. This should absolutely be safe - if
     // missing a callback results in an unrecoverable situation, unclean
     // shutdown would too. The only reason to do the above flushes is to let the
     // wallet catch up with our current chain to avoid any strange pruning edge
     // cases and make next startup faster by avoiding rescan.
 
     {
         LOCK(cs_main);
         if (pcoinsTip != nullptr) {
             ::ChainstateActive().ForceFlushStateToDisk();
         }
         pcoinsTip.reset();
         pcoinscatcher.reset();
         pcoinsdbview.reset();
         pblocktree.reset();
     }
     for (const auto &client : node.chain_clients) {
         client->stop();
     }
 
 #if ENABLE_ZMQ
     if (g_zmq_notification_interface) {
         UnregisterValidationInterface(g_zmq_notification_interface);
         delete g_zmq_notification_interface;
         g_zmq_notification_interface = nullptr;
     }
 #endif
 
     try {
         if (!fs::remove(GetPidFile())) {
             LogPrintf("%s: Unable to remove PID file: File does not exist\n",
                       __func__);
         }
     } catch (const fs::filesystem_error &e) {
         LogPrintf("%s: Unable to remove PID file: %s\n", __func__,
                   fsbridge::get_filesystem_error_message(e));
     }
     node.chain_clients.clear();
     UnregisterAllValidationInterfaces();
     GetMainSignals().UnregisterBackgroundSignalScheduler();
-    GetMainSignals().UnregisterWithMempoolSignals(g_mempool);
     globalVerifyHandle.reset();
     ECC_Stop();
     if (node.mempool) {
         node.mempool = nullptr;
     }
     LogPrintf("%s: done\n", __func__);
 }
 
 /**
  * Signal handlers are very limited in what they are allowed to do.
  * The execution context the handler is invoked in is not guaranteed,
  * so we restrict handler operations to just touching variables:
  */
 #ifndef WIN32
 static void HandleSIGTERM(int) {
     StartShutdown();
 }
 
 static void HandleSIGHUP(int) {
     LogInstance().m_reopen_file = true;
 }
 #else
 static BOOL WINAPI consoleCtrlHandler(DWORD dwCtrlType) {
     StartShutdown();
     Sleep(INFINITE);
     return true;
 }
 #endif
 
 #ifndef WIN32
 static void registerSignalHandler(int signal, void (*handler)(int)) {
     struct sigaction sa;
     sa.sa_handler = handler;
     sigemptyset(&sa.sa_mask);
     sa.sa_flags = 0;
     sigaction(signal, &sa, NULL);
 }
 #endif
 
 static void OnRPCStarted() {
     uiInterface.NotifyBlockTip_connect(&RPCNotifyBlockChange);
 }
 
 static void OnRPCStopped() {
     uiInterface.NotifyBlockTip_disconnect(&RPCNotifyBlockChange);
     RPCNotifyBlockChange(false, nullptr);
     g_best_block_cv.notify_all();
     LogPrint(BCLog::RPC, "RPC stopped.\n");
 }
 
 void SetupServerArgs() {
     const auto defaultBaseParams =
         CreateBaseChainParams(CBaseChainParams::MAIN);
     const auto testnetBaseParams =
         CreateBaseChainParams(CBaseChainParams::TESTNET);
     const auto regtestBaseParams =
         CreateBaseChainParams(CBaseChainParams::REGTEST);
     const auto defaultChainParams = CreateChainParams(CBaseChainParams::MAIN);
     const auto testnetChainParams =
         CreateChainParams(CBaseChainParams::TESTNET);
     const auto regtestChainParams =
         CreateChainParams(CBaseChainParams::REGTEST);
 
     // Hidden Options
     std::vector<std::string> hidden_args = {
         "-h", "-help", "-dbcrashratio", "-forcecompactdb", "-parkdeepreorg",
         "-automaticunparking", "-replayprotectionactivationtime",
         "-enableminerfund",
         // GUI args. These will be overwritten by SetupUIArgs for the GUI
         "-allowselfsignedrootcertificates", "-choosedatadir", "-lang=<lang>",
         "-min", "-resetguisettings", "-rootcertificates=<file>", "-splash",
         "-uiplatform",
         // TODO remove after the May 2020 upgrade
         "-phononactivationtime"};
 
     // Set all of the args and their help
     // When adding new options to the categories, please keep and ensure
     // alphabetical ordering. Do not translate _(...) -help-debug options, Many
     // technical terms, and only a very small audience, so is unnecessary stress
     // to translators.
     gArgs.AddArg("-?", "Print this help message and exit",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY,
                  OptionsCategory::OPTIONS);
     gArgs.AddArg("-alertnotify=<cmd>",
                  "Execute command when a relevant alert is received or we see "
                  "a really long fork (%s in cmd is replaced by message)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-assumevalid=<hex>",
         strprintf(
             "If this block is in the chain assume that it and its ancestors "
             "are valid and potentially skip their script verification (0 to "
             "verify all, default: %s, testnet: %s)",
             defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(),
             testnetChainParams->GetConsensus().defaultAssumeValid.GetHex()),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-blocksdir=<dir>",
                  "Specify directory to hold blocks subdirectory for *.dat "
                  "files (default: <datadir>)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-blocknotify=<cmd>",
                  "Execute command when the best block changes (%s in cmd is "
                  "replaced by block hash)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-blockreconstructionextratxn=<n>",
                  strprintf("Extra transactions to keep in memory for compact "
                            "block reconstructions (default: %u)",
                            DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-blocksonly",
         strprintf(
             "Whether to reject transactions from network peers. Transactions "
             "from the wallet or RPC are not affected. (default: %u)",
             DEFAULT_BLOCKSONLY),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-conf=<file>",
                  strprintf("Specify configuration file. Relative paths will be "
                            "prefixed by datadir location. (default: %s)",
                            BITCOIN_CONF_FILENAME),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-datadir=<dir>", "Specify data directory",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-dbbatchsize",
         strprintf("Maximum database write batch size in bytes (default: %u)",
                   nDefaultDbBatchSize),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-dbcache=<n>",
         strprintf(
             "Set database cache size in megabytes (%d to %d, default: %d)",
             nMinDbCache, nMaxDbCache, nDefaultDbCache),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-debuglogfile=<file>",
                  strprintf("Specify location of debug log file. Relative paths "
                            "will be prefixed by a net-specific datadir "
                            "location. (0 to disable; default: %s)",
                            DEFAULT_DEBUGLOGFILE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-feefilter",
                  strprintf("Tell other nodes to filter invs to us by our "
                            "mempool min fee (default: %d)",
                            DEFAULT_FEEFILTER),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::OPTIONS);
     gArgs.AddArg("-finalizationdelay=<n>",
                  strprintf("Set the minimum amount of time to wait between a "
                            "block header reception and the block finalization. "
                            "Unit is seconds (default: %d)",
                            DEFAULT_MIN_FINALIZATION_DELAY),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-includeconf=<file>",
         "Specify additional configuration file, relative to the -datadir path "
         "(only useable from configuration file, not command line)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-maxreorgdepth=<n>",
                  strprintf("Configure at what depth blocks are considered "
                            "final (default: %d). Use -1 to disable.",
                            DEFAULT_MAX_REORG_DEPTH),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-loadblock=<file>",
                  "Imports blocks from external blk000??.dat file on startup",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-maxmempool=<n>",
                  strprintf("Keep the transaction memory pool below <n> "
                            "megabytes (default: %u)",
                            DEFAULT_MAX_MEMPOOL_SIZE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-maxorphantx=<n>",
                  strprintf("Keep at most <n> unconnectable transactions in "
                            "memory (default: %u)",
                            DEFAULT_MAX_ORPHAN_TRANSACTIONS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-mempoolexpiry=<n>",
                  strprintf("Do not keep transactions in the mempool longer "
                            "than <n> hours (default: %u)",
                            DEFAULT_MEMPOOL_EXPIRY),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-minimumchainwork=<hex>",
         strprintf(
             "Minimum work assumed to exist on a valid chain in hex "
             "(default: %s, testnet: %s)",
             defaultChainParams->GetConsensus().nMinimumChainWork.GetHex(),
             testnetChainParams->GetConsensus().nMinimumChainWork.GetHex()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-par=<n>",
         strprintf("Set the number of script verification threads (%u to %d, 0 "
                   "= auto, <0 = leave that many cores free, default: %d)",
                   -GetNumCores(), MAX_SCRIPTCHECK_THREADS,
                   DEFAULT_SCRIPTCHECK_THREADS),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-persistmempool",
                  strprintf("Whether to save the mempool on shutdown and load "
                            "on restart (default: %u)",
                            DEFAULT_PERSIST_MEMPOOL),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-pid=<file>",
                  strprintf("Specify pid file. Relative paths will be prefixed "
                            "by a net-specific datadir location. (default: %s)",
                            BITCOIN_PID_FILENAME),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-prune=<n>",
         strprintf("Reduce storage requirements by enabling pruning (deleting) "
                   "of old blocks. This allows the pruneblockchain RPC to be "
                   "called to delete specific blocks, and enables automatic "
                   "pruning of old blocks if a target size in MiB is provided. "
                   "This mode is incompatible with -txindex and -rescan. "
                   "Warning: Reverting this setting requires re-downloading the "
                   "entire blockchain. (default: 0 = disable pruning blocks, 1 "
                   "= allow manual pruning via RPC, >=%u = automatically prune "
                   "block files to stay under the specified target size in MiB)",
                   MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024),
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-reindex-chainstate",
                  "Rebuild chain state from the currently indexed blocks. When "
                  "in pruning mode or if blocks on disk might be corrupted, use "
                  "full -reindex instead.",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg(
         "-reindex",
         "Rebuild chain state and block index from the blk*.dat files on disk",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #ifndef WIN32
     gArgs.AddArg(
         "-sysperms",
         "Create new files with system default permissions, instead of umask "
         "077 (only effective with disabled wallet functionality)",
         ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #else
     hidden_args.emplace_back("-sysperms");
 #endif
     gArgs.AddArg("-txindex",
                  strprintf("Maintain a full transaction index, used by the "
                            "getrawtransaction rpc call (default: %d)",
                            DEFAULT_TXINDEX),
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-blockfilterindex=<type>",
                  strprintf("Maintain an index of compact filters by block "
                            "(default: %s, values: %s).",
                            DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
                      " If <type> is not supplied or if <type> = 1, indexes for "
                      "all known types are enabled.",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
     gArgs.AddArg("-usecashaddr",
                  "Use Cash Address for destination encoding instead of base58 "
                  "(activate by default on Jan, 14)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 
     gArgs.AddArg("-addnode=<ip>",
                  "Add a node to connect to and attempt to keep the connection "
                  "open (see the `addnode` RPC command help for more info)",
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-banscore=<n>",
         strprintf("Threshold for disconnecting misbehaving peers (default: %u)",
                   DEFAULT_BANSCORE_THRESHOLD),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-bantime=<n>",
                  strprintf("Number of seconds to keep misbehaving peers from "
                            "reconnecting (default: %u)",
                            DEFAULT_MISBEHAVING_BANTIME),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-bind=<addr>",
                  "Bind to given address and always listen on it. Use "
                  "[host]:port notation for IPv6",
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-connect=<ip>",
         "Connect only to the specified node(s); -connect=0 disables automatic "
         "connections (the rules for this peer are the same as for -addnode)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
         OptionsCategory::CONNECTION);
     gArgs.AddArg("-discover",
                  "Discover own IP addresses (default: 1 when listening and no "
                  "-externalip or -proxy)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-dns",
                  strprintf("Allow DNS lookups for -addnode, -seednode and "
                            "-connect (default: %d)",
                            DEFAULT_NAME_LOOKUP),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-dnsseed",
                  "Query for peer addresses via DNS lookup, if low on addresses "
                  "(default: 1 unless -connect used)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-enablebip61",
                  strprintf("Send reject messages per BIP61 (default: %u)",
                            DEFAULT_ENABLE_BIP61),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 
     gArgs.AddArg("-externalip=<ip>", "Specify your own public address",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-forcednsseed",
         strprintf(
             "Always query for peer addresses via DNS lookup (default: %d)",
             DEFAULT_FORCEDNSSEED),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-listen",
         "Accept connections from outside (default: 1 if no -proxy or -connect)",
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-listenonion",
         strprintf("Automatically create Tor hidden service (default: %d)",
                   DEFAULT_LISTEN_ONION),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxconnections=<n>",
         strprintf("Maintain at most <n> connections to peers (default: %u)",
                   DEFAULT_MAX_PEER_CONNECTIONS),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-maxreceivebuffer=<n>",
                  strprintf("Maximum per-connection receive buffer, <n>*1000 "
                            "bytes (default: %u)",
                            DEFAULT_MAXRECEIVEBUFFER),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxsendbuffer=<n>",
         strprintf(
             "Maximum per-connection send buffer, <n>*1000 bytes (default: %u)",
             DEFAULT_MAXSENDBUFFER),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxtimeadjustment",
         strprintf("Maximum allowed median peer time offset adjustment. Local "
                   "perspective of time may be influenced by peers forward or "
                   "backward by this amount. (default: %u seconds)",
                   DEFAULT_MAX_TIME_ADJUSTMENT),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-onion=<ip:port>",
                  strprintf("Use separate SOCKS5 proxy to reach peers via Tor "
                            "hidden services (default: %s)",
                            "-proxy"),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-onlynet=<net>",
                  "Only connect to nodes in network <net> (ipv4, ipv6 or onion)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-peerbloomfilters",
                  strprintf("Support filtering of blocks and transaction with "
                            "bloom filters (default: %d)",
                            DEFAULT_PEERBLOOMFILTERS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-permitbaremultisig",
                  strprintf("Relay non-P2SH multisig (default: %d)",
                            DEFAULT_PERMIT_BAREMULTISIG),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-port=<port>",
                  strprintf("Listen for connections on <port> (default: %u, "
                            "testnet: %u, regtest: %u)",
                            defaultChainParams->GetDefaultPort(),
                            testnetChainParams->GetDefaultPort(),
                            regtestChainParams->GetDefaultPort()),
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::CONNECTION);
     gArgs.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-proxyrandomize",
                  strprintf("Randomize credentials for every proxy connection. "
                            "This enables Tor stream isolation (default: %d)",
                            DEFAULT_PROXYRANDOMIZE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-seednode=<ip>",
                  "Connect to a node to retrieve peer addresses, and disconnect",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-timeout=<n>",
                  strprintf("Specify connection timeout in milliseconds "
                            "(minimum: 1, default: %d)",
                            DEFAULT_CONNECT_TIMEOUT),
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-peertimeout=<n>",
         strprintf("Specify p2p connection timeout in seconds. This option "
                   "determines the amount of time a peer may be inactive before "
                   "the connection to it is dropped. (minimum: 1, default: %d)",
                   DEFAULT_PEER_CONNECT_TIMEOUT),
         true, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-torcontrol=<ip>:<port>",
         strprintf(
             "Tor control port to use if onion listening enabled (default: %s)",
             DEFAULT_TOR_CONTROL),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-torpassword=<pass>",
                  "Tor control port password (default: empty)",
                  ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                  OptionsCategory::CONNECTION);
 #ifdef USE_UPNP
 #if USE_UPNP
     gArgs.AddArg("-upnp",
                  "Use UPnP to map the listening port (default: 1 when "
                  "listening and no -proxy)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 #else
     gArgs.AddArg(
         "-upnp",
         strprintf("Use UPnP to map the listening port (default: %u)", 0),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 #endif
 #else
     hidden_args.emplace_back("-upnp");
 #endif
     gArgs.AddArg("-whitebind=<addr>",
                  "Bind to given address and whitelist peers connecting to it. "
                  "Use [host]:port notation for IPv6",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg("-whitelist=<IP address or network>",
                  "Whitelist peers connecting from the given IP address (e.g. "
                  "1.2.3.4) or CIDR notated network (e.g. 1.2.3.0/24). Can be "
                  "specified multiple times. "
                  "Whitelisted peers cannot be DoS banned and their "
                  "transactions are always relayed, even if they are already in "
                  "the mempool, useful e.g. for a gateway",
                  ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
     gArgs.AddArg(
         "-maxuploadtarget=<n>",
         strprintf("Tries to keep outbound traffic under the given target (in "
                   "MiB per 24h), 0 = no limit (default: %d)",
                   DEFAULT_MAX_UPLOAD_TARGET),
         ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
 
     g_wallet_init_interface.AddWalletOptions();
 
 #if ENABLE_ZMQ
     gArgs.AddArg("-zmqpubhashblock=<address>",
                  "Enable publish hash block in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubhashtx=<address>",
                  "Enable publish hash transaction in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubrawblock=<address>",
                  "Enable publish raw block in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
     gArgs.AddArg("-zmqpubrawtx=<address>",
                  "Enable publish raw transaction in <address>",
                  ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
 #else
     hidden_args.emplace_back("-zmqpubhashblock=<address>");
     hidden_args.emplace_back("-zmqpubhashtx=<address>");
     hidden_args.emplace_back("-zmqpubrawblock=<address>");
     hidden_args.emplace_back("-zmqpubrawtx=<address>");
 #endif
 
     gArgs.AddArg(
         "-checkblocks=<n>",
         strprintf("How many blocks to check at startup (default: %u, 0 = all)",
                   DEFAULT_CHECKBLOCKS),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-checklevel=<n>",
         strprintf("How thorough the block verification of "
                   "-checkblocks is: "
                   "level 0 reads the blocks from disk, "
                   "level 1 verifies block validity, "
                   "level 2 verifies undo data, "
                   "level 3 checks disconnection of tip blocks, "
                   "and level 4 tries to reconnect the blocks. "
                   "Each level includes the checks of the previous levels "
                   "(0-4, default: %u)",
                   DEFAULT_CHECKLEVEL),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-checkblockindex",
         strprintf("Do a full consistency check for mapBlockIndex, "
                   "setBlockIndexCandidates, ::ChainActive() and "
                   "mapBlocksUnlinked occasionally. (default: %u, regtest: %u)",
                   defaultChainParams->DefaultConsistencyChecks(),
                   regtestChainParams->DefaultConsistencyChecks()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-checkmempool=<n>",
         strprintf(
             "Run checks every <n> transactions (default: %u, regtest: %u)",
             defaultChainParams->DefaultConsistencyChecks(),
             regtestChainParams->DefaultConsistencyChecks()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-checkpoints",
                  strprintf("Only accept block chain matching built-in "
                            "checkpoints (default: %d)",
                            DEFAULT_CHECKPOINTS_ENABLED),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-deprecatedrpc=<method>",
                  "Allows deprecated RPC method(s) to be used",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-dropmessagestest=<n>",
                  "Randomly drop 1 of every <n> network messages",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-stopafterblockimport",
         strprintf("Stop running after importing blocks from disk (default: %d)",
                   DEFAULT_STOPAFTERBLOCKIMPORT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-stopatheight",
                  strprintf("Stop running after reaching the given height in "
                            "the main chain (default: %u)",
                            DEFAULT_STOPATHEIGHT),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-limitancestorcount=<n>",
                  strprintf("Do not accept transactions if number of in-mempool "
                            "ancestors is <n> or more (pre-phonon-upgrade "
                            "default: %u, post-phonon-upgrade default: %u)",
                            DEFAULT_ANCESTOR_LIMIT,
                            DEFAULT_ANCESTOR_LIMIT_LONGER),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-limitancestorsize=<n>",
         strprintf("Do not accept transactions whose size with all in-mempool "
                   "ancestors exceeds <n> kilobytes (default: %u)",
                   DEFAULT_ANCESTOR_SIZE_LIMIT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-limitdescendantcount=<n>",
         strprintf("Do not accept transactions if any ancestor would have <n> "
                   "or more in-mempool descendants (default pre-phonon-upgrade: "
                   "%u, default post-phonon-upgrade: %u)",
                   DEFAULT_DESCENDANT_LIMIT, DEFAULT_DESCENDANT_LIMIT_LONGER),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-limitdescendantsize=<n>",
         strprintf("Do not accept transactions if any ancestor would have more "
                   "than <n> kilobytes of in-mempool descendants (default: %u).",
                   DEFAULT_DESCENDANT_SIZE_LIMIT),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-addrmantest", "Allows to test address relay on localhost",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
 
     gArgs.AddArg("-debug=<category>",
                  strprintf("Output debugging information (default: %u, "
                            "supplying <category> is optional)",
                            0) +
                      ". " +
                      "If <category> is not supplied or if <category> = 1, "
                      "output all debugging information."
                      "<category> can be: " +
                      ListLogCategories() + ".",
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-debugexclude=<category>",
         strprintf("Exclude debugging information for a category. Can be used "
                   "in conjunction with -debug=1 to output debug logs for all "
                   "categories except one or more specified categories."),
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-help-debug",
                  "Print help message with debugging options and exit",
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-logips",
                  strprintf("Include IP addresses in debug output (default: %d)",
                            DEFAULT_LOGIPS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-logtimestamps",
                  strprintf("Prepend debug output with timestamp (default: %d)",
                            DEFAULT_LOGTIMESTAMPS),
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-logthreadnames",
         strprintf(
             "Prepend debug output with name of the originating thread (only "
             "available on platforms supporting thread_local) (default: %u)",
             DEFAULT_LOGTHREADNAMES),
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-logtimemicros",
         strprintf("Add microsecond precision to debug timestamps (default: %d)",
                   DEFAULT_LOGTIMEMICROS),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-mocktime=<n>",
         "Replace actual time with <n> seconds since epoch (default: 0)",
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-maxsigcachesize=<n>",
         strprintf("Limit size of signature cache to <n> MiB (default: %u)",
                   DEFAULT_MAX_SIG_CACHE_SIZE),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-maxscriptcachesize=<n>",
         strprintf("Limit size of script cache to <n> MiB (default: %u)",
                   DEFAULT_MAX_SCRIPT_CACHE_SIZE),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-maxtipage=<n>",
                  strprintf("Maximum tip age in seconds to consider node in "
                            "initial block download (default: %u)",
                            DEFAULT_MAX_TIP_AGE),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
 
     gArgs.AddArg(
         "-printtoconsole",
         "Send trace/debug info to console instead of debug.log file (default: "
         "1 when no -daemon. To disable logging to file, set debuglogfile=0)",
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
     gArgs.AddArg("-printpriority",
                  strprintf("Log transaction priority and fee per kB when "
                            "mining blocks (default: %d)",
                            DEFAULT_PRINTPRIORITY),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::DEBUG_TEST);
     gArgs.AddArg(
         "-shrinkdebugfile",
         "Shrink debug.log file on client startup (default: 1 when no -debug)",
         ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
 
     gArgs.AddArg("-uacomment=<cmt>", "Append comment to the user agent string",
                  ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
 
     SetupChainParamsBaseOptions();
 
     gArgs.AddArg(
         "-acceptnonstdtxn",
         strprintf(
             "Relay and mine \"non-standard\" transactions (%sdefault: %u)",
             "testnet/regtest only; ", defaultChainParams->RequireStandard()),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::NODE_RELAY);
     gArgs.AddArg("-excessiveblocksize=<n>",
                  strprintf("Do not accept blocks larger than this limit, in "
                            "bytes (default: %d)",
                            DEFAULT_MAX_BLOCK_SIZE),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-dustrelayfee=<amt>",
         strprintf("Fee rate (in %s/kB) used to defined dust, the value of an "
                   "output such that it will cost about 1/3 of its value in "
                   "fees at this fee rate to spend it. (default: %s)",
                   CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE)),
         ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
         OptionsCategory::NODE_RELAY);
 
     gArgs.AddArg("-bytespersigop",
                  strprintf("Equivalent bytes per sigop in transactions for "
                            "relay and mining (default: %u)",
                            DEFAULT_BYTES_PER_SIGOP),
                  ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-datacarrier",
         strprintf("Relay and mine data carrier transactions (default: %d)",
                   DEFAULT_ACCEPT_DATACARRIER),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg("-datacarriersize",
                  strprintf("Maximum size of data in data carrier transactions "
                            "we relay and mine (default: %u)",
                            MAX_OP_RETURN_RELAY),
                  ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-minrelaytxfee=<amt>",
         strprintf("Fees (in %s/kB) smaller than this are rejected for "
                   "relaying, mining and transaction creation (default: %s)",
                   CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE_PER_KB)),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-whitelistrelay",
         strprintf("Accept relayed transactions received from whitelisted "
                   "peers even when not relaying transactions (default: %d)",
                   DEFAULT_WHITELISTRELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
     gArgs.AddArg(
         "-whitelistforcerelay",
         strprintf("Force relay of transactions from whitelisted peers even if "
                   "they violate local relay policy (default: %d)",
                   DEFAULT_WHITELISTFORCERELAY),
         ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY);
 
     // Not sure this really belongs here, but it will do for now.
     // FIXME: This doesn't work anyways.
     gArgs.AddArg("-excessutxocharge=<amt>",
                  strprintf("Fees (in %s/kB) to charge per utxo created for "
                            "relaying, and mining (default: %s)",
                            CURRENCY_UNIT, FormatMoney(DEFAULT_UTXO_FEE)),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::NODE_RELAY);
 
     gArgs.AddArg("-blockmaxsize=<n>",
                  strprintf("Set maximum block size in bytes (default: %d)",
                            DEFAULT_MAX_GENERATED_BLOCK_SIZE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
     gArgs.AddArg("-blockmintxfee=<amt>",
                  strprintf("Set lowest fee rate (in %s/kB) for transactions to "
                            "be included in block creation. (default: %s)",
                            CURRENCY_UNIT,
                            FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE_PER_KB)),
                  ArgsManager::ALLOW_ANY, OptionsCategory::BLOCK_CREATION);
 
     gArgs.AddArg("-blockversion=<n>",
                  "Override block version to test forking scenarios",
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::BLOCK_CREATION);
 
     gArgs.AddArg("-server", "Accept command line and JSON-RPC commands",
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg("-rest",
                  strprintf("Accept public REST requests (default: %d)",
                            DEFAULT_REST_ENABLE),
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcbind=<addr>[:port]",
         "Bind to given address to listen for JSON-RPC connections. Do not "
         "expose the RPC server to untrusted networks such as the public "
         "internet! This option is ignored unless -rpcallowip is also passed. "
         "Port is optional and overrides -rpcport.  Use [host]:port notation "
         "for IPv6. This option can be specified multiple times (default: "
         "127.0.0.1 and ::1 i.e., localhost)",
         ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY |
             ArgsManager::SENSITIVE,
         OptionsCategory::RPC);
     gArgs.AddArg("-rpccookiefile=<loc>",
                  "Location of the auth cookie. Relative paths will be prefixed "
                  "by a net-specific datadir location. (default: data dir)",
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections",
                  ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                  OptionsCategory::RPC);
     gArgs.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections",
                  ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE,
                  OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcwhitelist=<whitelist>",
         "Set a whitelist to filter incoming RPC calls for a specific user. The "
         "field <whitelist> comes in the format: <USERNAME>:<rpc 1>,<rpc "
         "2>,...,<rpc n>. If multiple whitelists are set for a given user, they "
         "are set-intersected. See -rpcwhitelistdefault documentation for "
         "information on default whitelist behavior.",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcwhitelistdefault",
         "Sets default behavior for rpc whitelisting. Unless "
         "rpcwhitelistdefault is set to 0, if any -rpcwhitelist is set, the rpc "
         "server acts as if all rpc users are subject to "
         "empty-unless-otherwise-specified whitelists. If rpcwhitelistdefault "
         "is set to 1 and no -rpcwhitelist is set, rpc server acts as if all "
         "rpc users are subject to empty whitelists.",
         ArgsManager::ALLOW_BOOL, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcauth=<userpw>",
         "Username and hashed password for JSON-RPC connections. The field "
         "<userpw> comes in the format: <USERNAME>:<SALT>$<HASH>. A canonical "
         "python script is included in share/rpcauth. The client then connects "
         "normally using the rpcuser=<USERNAME>/rpcpassword=<PASSWORD> pair of "
         "arguments. This option can be specified multiple times",
         ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
     gArgs.AddArg("-rpcport=<port>",
                  strprintf("Listen for JSON-RPC connections on <port> "
                            "(default: %u, testnet: %u, regtest: %u)",
                            defaultBaseParams->RPCPort(),
                            testnetBaseParams->RPCPort(),
                            regtestBaseParams->RPCPort()),
                  ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY,
                  OptionsCategory::RPC);
     gArgs.AddArg("-rpcallowip=<ip>",
                  "Allow JSON-RPC connections from specified source. Valid for "
                  "<ip> are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. "
                  "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). "
                  "This option can be specified multiple times",
                  ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpcthreads=<n>",
         strprintf(
             "Set the number of threads to service RPC calls (default: %d)",
             DEFAULT_HTTP_THREADS),
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
     gArgs.AddArg(
         "-rpccorsdomain=value",
         "Domain from which to accept cross origin requests (browser enforced)",
         ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
 
     gArgs.AddArg("-rpcworkqueue=<n>",
                  strprintf("Set the depth of the work queue to service RPC "
                            "calls (default: %d)",
                            DEFAULT_HTTP_WORKQUEUE),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::RPC);
     gArgs.AddArg("-rpcservertimeout=<n>",
                  strprintf("Timeout during HTTP requests (default: %d)",
                            DEFAULT_HTTP_SERVER_TIMEOUT),
                  ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY,
                  OptionsCategory::RPC);
 
 #if HAVE_DECL_DAEMON
     gArgs.AddArg("-daemon",
                  "Run in the background as a daemon and accept commands",
                  ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
 #else
     hidden_args.emplace_back("-daemon");
 #endif
 
     // Avalanche options.
     gArgs.AddArg(
         "-enableavalanche",
         strprintf("Enable avalanche (default: %u)", AVALANCHE_DEFAULT_ENABLED),
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
     gArgs.AddArg(
         "-avacooldown",
         strprintf("Mandatory cooldown between two avapoll (default: %u)",
                   AVALANCHE_DEFAULT_COOLDOWN),
         ArgsManager::ALLOW_ANY, OptionsCategory::AVALANCHE);
 
     // Add the hidden options
     gArgs.AddHiddenArgs(hidden_args);
 }
 
 std::string LicenseInfo() {
     const std::string URL_SOURCE_CODE =
         "<https://github.com/Bitcoin-ABC/bitcoin-abc>";
     const std::string URL_WEBSITE = "<https://www.bitcoinabc.org>";
 
     return CopyrightHolders(strprintf(_("Copyright (C) %i-%i").translated, 2009,
                                       COPYRIGHT_YEAR) +
                             " ") +
            "\n" + "\n" +
            strprintf(_("Please contribute if you find %s useful. "
                        "Visit %s for further information about the software.")
                          .translated,
                      PACKAGE_NAME, URL_WEBSITE) +
            "\n" +
            strprintf(_("The source code is available from %s.").translated,
                      URL_SOURCE_CODE) +
            "\n" + "\n" + _("This is experimental software.").translated + "\n" +
            strprintf(_("Distributed under the MIT software license, see the "
                        "accompanying file %s or %s")
                          .translated,
                      "COPYING", "<https://opensource.org/licenses/MIT>") +
            "\n" + "\n" +
            strprintf(_("This product includes software developed by the "
                        "OpenSSL Project for use in the OpenSSL Toolkit %s and "
                        "cryptographic software written by Eric Young and UPnP "
                        "software written by Thomas Bernard.")
                          .translated,
                      "<https://www.openssl.org>") +
            "\n";
 }
 
 static void BlockNotifyCallback(bool initialSync,
                                 const CBlockIndex *pBlockIndex) {
     if (initialSync || !pBlockIndex) {
         return;
     }
 
     std::string strCmd = gArgs.GetArg("-blocknotify", "");
     if (!strCmd.empty()) {
         boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex());
         std::thread t(runCommand, strCmd);
         // thread runs free
         t.detach();
     }
 }
 
 static bool fHaveGenesis = false;
 static Mutex g_genesis_wait_mutex;
 static std::condition_variable g_genesis_wait_cv;
 
 static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) {
     if (pBlockIndex != nullptr) {
         {
             LOCK(g_genesis_wait_mutex);
             fHaveGenesis = true;
         }
         g_genesis_wait_cv.notify_all();
     }
 }
 
 struct CImportingNow {
     CImportingNow() {
         assert(fImporting == false);
         fImporting = true;
     }
 
     ~CImportingNow() {
         assert(fImporting == true);
         fImporting = false;
     }
 };
 
 // If we're using -prune with -reindex, then delete block files that will be
 // ignored by the reindex.  Since reindexing works by starting at block file 0
 // and looping until a blockfile is missing, do the same here to delete any
 // later block files after a gap. Also delete all rev files since they'll be
 // rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync
 // with what's actually on disk by the time we start downloading, so that
 // pruning works correctly.
 static void CleanupBlockRevFiles() {
     std::map<std::string, fs::path> mapBlockFiles;
 
     // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
     // Remove the rev files immediately and insert the blk file paths into an
     // ordered map keyed by block file index.
     LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for "
               "-reindex with -prune\n");
     const auto directoryIterator = fs::directory_iterator{GetBlocksDir()};
     for (const auto &file : directoryIterator) {
         const auto fileName = file.path().filename().string();
         if (fs::is_regular_file(file) && fileName.length() == 12 &&
             fileName.substr(8, 4) == ".dat") {
             if (fileName.substr(0, 3) == "blk") {
                 mapBlockFiles[fileName.substr(3, 5)] = file.path();
             } else if (fileName.substr(0, 3) == "rev") {
                 remove(file.path());
             }
         }
     }
 
     // Remove all block files that aren't part of a contiguous set starting at
     // zero by walking the ordered map (keys are block file indices) by keeping
     // a separate counter. Once we hit a gap (or if 0 doesn't exist) start
     // removing block files.
     int contiguousCounter = 0;
     for (const auto &item : mapBlockFiles) {
         if (atoi(item.first) == contiguousCounter) {
             contiguousCounter++;
             continue;
         }
         remove(item.second);
     }
 }
 
 static void ThreadImport(const Config &config,
                          std::vector<fs::path> vImportFiles) {
     util::ThreadRename("loadblk");
     ScheduleBatchPriority();
 
     {
         const CChainParams &chainParams = config.GetChainParams();
 
         CImportingNow imp;
 
         // -reindex
         if (fReindex) {
             int nFile = 0;
             while (true) {
                 FlatFilePos pos(nFile, 0);
                 if (!fs::exists(GetBlockPosFilename(pos))) {
                     // No block files left to reindex
                     break;
                 }
                 FILE *file = OpenBlockFile(pos, true);
                 if (!file) {
                     // This error is logged in OpenBlockFile
                     break;
                 }
                 LogPrintf("Reindexing block file blk%05u.dat...\n",
                           (unsigned int)nFile);
                 LoadExternalBlockFile(config, file, &pos);
                 nFile++;
             }
             pblocktree->WriteReindexing(false);
             fReindex = false;
             LogPrintf("Reindexing finished\n");
             // To avoid ending up in a situation without genesis block, re-try
             // initializing (no-op if reindexing worked):
             LoadGenesisBlock(chainParams);
         }
 
         // hardcoded $DATADIR/bootstrap.dat
         fs::path pathBootstrap = GetDataDir() / "bootstrap.dat";
         if (fs::exists(pathBootstrap)) {
             FILE *file = fsbridge::fopen(pathBootstrap, "rb");
             if (file) {
                 fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old";
                 LogPrintf("Importing bootstrap.dat...\n");
                 LoadExternalBlockFile(config, file);
                 RenameOver(pathBootstrap, pathBootstrapOld);
             } else {
                 LogPrintf("Warning: Could not open bootstrap file %s\n",
                           pathBootstrap.string());
             }
         }
 
         // -loadblock=
         for (const fs::path &path : vImportFiles) {
             FILE *file = fsbridge::fopen(path, "rb");
             if (file) {
                 LogPrintf("Importing blocks file %s...\n", path.string());
                 LoadExternalBlockFile(config, file);
             } else {
                 LogPrintf("Warning: Could not open blocks file %s\n",
                           path.string());
             }
         }
 
         // Reconsider blocks we know are valid. They may have been marked
         // invalid by, for instance, running an outdated version of the node
         // software.
         const MapCheckpoints &checkpoints =
             chainParams.Checkpoints().mapCheckpoints;
         for (const MapCheckpoints::value_type &i : checkpoints) {
             const BlockHash &hash = i.second;
 
             LOCK(cs_main);
             CBlockIndex *pblockindex = LookupBlockIndex(hash);
             if (pblockindex && !pblockindex->nStatus.isValid()) {
                 LogPrintf("Reconsidering checkpointed block %s ...\n",
                           hash.GetHex());
                 ResetBlockFailureFlags(pblockindex);
             }
         }
 
         // scan for better chains in the block chain database, that are not yet
         // connected in the active best chain
         CValidationState state;
         if (!ActivateBestChain(config, state)) {
             LogPrintf("Failed to connect best block (%s)\n",
                       FormatStateMessage(state));
             StartShutdown();
             return;
         }
 
         if (gArgs.GetBoolArg("-stopafterblockimport",
                              DEFAULT_STOPAFTERBLOCKIMPORT)) {
             LogPrintf("Stopping after block import\n");
             StartShutdown();
             return;
         }
     } // End scope of CImportingNow
     if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) {
         LoadMempool(config, ::g_mempool);
     }
     ::g_mempool.SetIsLoaded(!ShutdownRequested());
 }
 
 /** Sanity checks
  *  Ensure that Bitcoin is running in a usable environment with all
  *  necessary library support.
  */
 static bool InitSanityCheck() {
     if (!ECC_InitSanityCheck()) {
         InitError(
             "Elliptic curve cryptography sanity check failure. Aborting.");
         return false;
     }
 
     if (!glibcxx_sanity_test()) {
         return false;
     }
 
     if (!Random_SanityCheck()) {
         InitError("OS cryptographic RNG sanity check failure. Aborting.");
         return false;
     }
 
     return true;
 }
 
 static bool AppInitServers(Config &config,
                            HTTPRPCRequestProcessor &httpRPCRequestProcessor) {
     RPCServerSignals::OnStarted(&OnRPCStarted);
     RPCServerSignals::OnStopped(&OnRPCStopped);
     if (!InitHTTPServer(config)) {
         return false;
     }
 
     StartRPC();
 
     if (!StartHTTPRPC(httpRPCRequestProcessor)) {
         return false;
     }
     if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE)) {
         StartREST();
     }
 
     StartHTTPServer();
     return true;
 }
 
 // Parameter interaction based on rules
 void InitParameterInteraction() {
     // when specifying an explicit binding address, you want to listen on it
     // even when -connect or -proxy is specified.
     if (gArgs.IsArgSet("-bind")) {
         if (gArgs.SoftSetBoolArg("-listen", true)) {
             LogPrintf(
                 "%s: parameter interaction: -bind set -> setting -listen=1\n",
                 __func__);
         }
     }
     if (gArgs.IsArgSet("-whitebind")) {
         if (gArgs.SoftSetBoolArg("-listen", true)) {
             LogPrintf("%s: parameter interaction: -whitebind set -> setting "
                       "-listen=1\n",
                       __func__);
         }
     }
 
     if (gArgs.IsArgSet("-connect")) {
         // when only connecting to trusted nodes, do not seed via DNS, or listen
         // by default.
         if (gArgs.SoftSetBoolArg("-dnsseed", false)) {
             LogPrintf("%s: parameter interaction: -connect set -> setting "
                       "-dnsseed=0\n",
                       __func__);
         }
         if (gArgs.SoftSetBoolArg("-listen", false)) {
             LogPrintf("%s: parameter interaction: -connect set -> setting "
                       "-listen=0\n",
                       __func__);
         }
     }
 
     if (gArgs.IsArgSet("-proxy")) {
         // to protect privacy, do not listen by default if a default proxy
         // server is specified.
         if (gArgs.SoftSetBoolArg("-listen", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -listen=0\n",
                 __func__);
         }
         // to protect privacy, do not use UPNP when a proxy is set. The user may
         // still specify -listen=1 to listen locally, so don't rely on this
         // happening through -listen below.
         if (gArgs.SoftSetBoolArg("-upnp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -proxy set -> setting -upnp=0\n",
                 __func__);
         }
         // to protect privacy, do not discover addresses by default
         if (gArgs.SoftSetBoolArg("-discover", false)) {
             LogPrintf("%s: parameter interaction: -proxy set -> setting "
                       "-discover=0\n",
                       __func__);
         }
     }
 
     if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
         // do not map ports or try to retrieve public IP when not listening
         // (pointless)
         if (gArgs.SoftSetBoolArg("-upnp", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -upnp=0\n",
                 __func__);
         }
         if (gArgs.SoftSetBoolArg("-discover", false)) {
             LogPrintf(
                 "%s: parameter interaction: -listen=0 -> setting -discover=0\n",
                 __func__);
         }
         if (gArgs.SoftSetBoolArg("-listenonion", false)) {
             LogPrintf("%s: parameter interaction: -listen=0 -> setting "
                       "-listenonion=0\n",
                       __func__);
         }
     }
 
     if (gArgs.IsArgSet("-externalip")) {
         // if an explicit public IP is specified, do not try to find others
         if (gArgs.SoftSetBoolArg("-discover", false)) {
             LogPrintf("%s: parameter interaction: -externalip set -> setting "
                       "-discover=0\n",
                       __func__);
         }
     }
 
     // disable whitelistrelay in blocksonly mode
     if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
         if (gArgs.SoftSetBoolArg("-whitelistrelay", false)) {
             LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting "
                       "-whitelistrelay=0\n",
                       __func__);
         }
     }
 
     // Forcing relay from whitelisted hosts implies we will accept relays from
     // them in the first place.
     if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
         if (gArgs.SoftSetBoolArg("-whitelistrelay", true)) {
             LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> "
                       "setting -whitelistrelay=1\n",
                       __func__);
         }
     }
 }
 
 static std::string ResolveErrMsg(const char *const optname,
                                  const std::string &strBind) {
     return strprintf(_("Cannot resolve -%s address: '%s'").translated, optname,
                      strBind);
 }
 
 /**
  * Initialize global loggers.
  *
  * Note that this is called very early in the process lifetime, so you should be
  * careful about what global state you rely on here.
  */
 void InitLogging() {
     LogInstance().m_print_to_file = !gArgs.IsArgNegated("-debuglogfile");
     LogInstance().m_file_path = AbsPathForConfigVal(
         gArgs.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
 
     LogInstance().m_print_to_console = gArgs.GetBoolArg(
         "-printtoconsole", !gArgs.GetBoolArg("-daemon", false));
     LogInstance().m_log_timestamps =
         gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
     LogInstance().m_log_time_micros =
         gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
     LogInstance().m_log_threadnames =
         gArgs.GetBoolArg("-logthreadnames", DEFAULT_LOGTHREADNAMES);
 
     fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS);
 
     std::string version_string = FormatFullVersion();
 #ifdef DEBUG
     version_string += " (debug build)";
 #else
     version_string += " (release build)";
 #endif
     LogPrintf("%s version %s\n", CLIENT_NAME, version_string);
 }
 
 namespace { // Variables internal to initialization process only
 
 int nMaxConnections;
 int nUserMaxConnections;
 int nFD;
 ServiceFlags nLocalServices = ServiceFlags(NODE_NETWORK | NODE_NETWORK_LIMITED);
 int64_t peer_connect_timeout;
 std::vector<BlockFilterType> g_enabled_filter_types;
 
 } // namespace
 
 [[noreturn]] static void new_handler_terminate() {
     // Rather than throwing std::bad-alloc if allocation fails, terminate
     // immediately to (try to) avoid chain corruption. Since LogPrintf may
     // itself allocate memory, set the handler directly to terminate first.
     std::set_new_handler(std::terminate);
     LogPrintf("Error: Out of memory. Terminating.\n");
 
     // The log was successful, terminate now.
     std::terminate();
 };
 
 bool AppInitBasicSetup() {
 // Step 1: setup
 #ifdef _MSC_VER
     // Turn off Microsoft heap dump noise
     _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
     _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr,
                                              OPEN_EXISTING, 0, 0));
     // Disable confusing "helpful" text message on abort, Ctrl-C
     _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
 #endif
 #ifdef WIN32
 // Enable Data Execution Prevention (DEP)
 // Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008
 // A failure is non-critical and needs no further attention!
 #ifndef PROCESS_DEP_ENABLE
 // We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >=
 // 0x0601 (Windows 7), which is not correct. Can be removed, when GCCs winbase.h
 // is fixed!
 #define PROCESS_DEP_ENABLE 0x00000001
 #endif
     typedef BOOL(WINAPI * PSETPROCDEPPOL)(DWORD);
     PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress(
         GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy");
     if (setProcDEPPol != nullptr) {
         setProcDEPPol(PROCESS_DEP_ENABLE);
     }
 #endif
 
     if (!SetupNetworking()) {
         return InitError("Initializing networking failed");
     }
 
 #ifndef WIN32
     if (!gArgs.GetBoolArg("-sysperms", false)) {
         umask(077);
     }
 
     // Clean shutdown on SIGTERM
     registerSignalHandler(SIGTERM, HandleSIGTERM);
     registerSignalHandler(SIGINT, HandleSIGTERM);
 
     // Reopen debug.log on SIGHUP
     registerSignalHandler(SIGHUP, HandleSIGHUP);
 
     // Ignore SIGPIPE, otherwise it will bring the daemon down if the client
     // closes unexpectedly
     signal(SIGPIPE, SIG_IGN);
 #else
     SetConsoleCtrlHandler(consoleCtrlHandler, true);
 #endif
 
     std::set_new_handler(new_handler_terminate);
 
     return true;
 }
 
 bool AppInitParameterInteraction(Config &config) {
     const CChainParams &chainparams = config.GetChainParams();
     // Step 2: parameter interactions
 
     // also see: InitParameterInteraction()
 
     // Warn if network-specific options (-addnode, -connect, etc) are
     // specified in default section of config file, but not overridden
     // on the command line or in this network's section of the config file.
     std::string network = gArgs.GetChainName();
     for (const auto &arg : gArgs.GetUnsuitableSectionOnlyArgs()) {
         return InitError(strprintf(_("Config setting for %s only applied on %s "
                                      "network when in [%s] section.")
                                        .translated,
                                    arg, network, network));
     }
 
     // Warn if unrecognized section name are present in the config file.
     for (const auto &section : gArgs.GetUnrecognizedSections()) {
         InitWarning(strprintf(
             "%s:%i " + _("Section [%s] is not recognized.").translated,
             section.m_file, section.m_line, section.m_name));
     }
 
     if (!fs::is_directory(GetBlocksDir())) {
         return InitError(strprintf(
             _("Specified blocks directory \"%s\" does not exist.").translated,
             gArgs.GetArg("-blocksdir", "")));
     }
 
     // parse and validate enabled filter types
     std::string blockfilterindex_value =
         gArgs.GetArg("-blockfilterindex", DEFAULT_BLOCKFILTERINDEX);
     if (blockfilterindex_value == "" || blockfilterindex_value == "1") {
         g_enabled_filter_types = AllBlockFilterTypes();
     } else if (blockfilterindex_value != "0") {
         const std::vector<std::string> names =
             gArgs.GetArgs("-blockfilterindex");
         g_enabled_filter_types.reserve(names.size());
         for (const auto &name : names) {
             BlockFilterType filter_type;
             if (!BlockFilterTypeByName(name, filter_type)) {
                 return InitError(strprintf(
                     _("Unknown -blockfilterindex value %s.").translated, name));
             }
             g_enabled_filter_types.push_back(filter_type);
         }
     }
 
     // if using block pruning, then disallow txindex
     if (gArgs.GetArg("-prune", 0)) {
         if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
             return InitError(
                 _("Prune mode is incompatible with -txindex.").translated);
         }
         if (!g_enabled_filter_types.empty()) {
             return InitError(
                 _("Prune mode is incompatible with -blockfilterindex.")
                     .translated);
         }
     }
 
     // -bind and -whitebind can't be set when not listening
     size_t nUserBind =
         gArgs.GetArgs("-bind").size() + gArgs.GetArgs("-whitebind").size();
     if (nUserBind != 0 && !gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) {
         return InitError(
             "Cannot set -bind or -whitebind together with -listen=0");
     }
 
     // Make sure enough file descriptors are available
     int nBind = std::max(nUserBind, size_t(1));
     nUserMaxConnections =
         gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS);
     nMaxConnections = std::max(nUserMaxConnections, 0);
 
     // Trim requested connection counts, to fit into system limitations
     nMaxConnections =
         std::max(std::min(nMaxConnections, FD_SETSIZE - nBind -
                                                MIN_CORE_FILEDESCRIPTORS -
                                                MAX_ADDNODE_CONNECTIONS),
                  0);
     nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS +
                                    MAX_ADDNODE_CONNECTIONS);
     if (nFD < MIN_CORE_FILEDESCRIPTORS) {
         return InitError(
             _("Not enough file descriptors available.").translated);
     }
     nMaxConnections =
         std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS,
                  nMaxConnections);
 
     if (nMaxConnections < nUserMaxConnections) {
         InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, "
                                 "because of system limitations.")
                                   .translated,
                               nUserMaxConnections, nMaxConnections));
     }
 
     // Step 3: parameter-to-internal-flags
     if (gArgs.IsArgSet("-debug")) {
         // Special-case: if -debug=0/-nodebug is set, turn off debugging
         // messages
         const std::vector<std::string> &categories = gArgs.GetArgs("-debug");
         if (std::none_of(
                 categories.begin(), categories.end(),
                 [](std::string cat) { return cat == "0" || cat == "none"; })) {
             for (const auto &cat : categories) {
                 if (!LogInstance().EnableCategory(cat)) {
                     InitWarning(strprintf(
                         _("Unsupported logging category %s=%s.").translated,
                         "-debug", cat));
                 }
             }
         }
     }
 
     // Now remove the logging categories which were explicitly excluded
     for (const std::string &cat : gArgs.GetArgs("-debugexclude")) {
         if (!LogInstance().DisableCategory(cat)) {
             InitWarning(
                 strprintf(_("Unsupported logging category %s=%s.").translated,
                           "-debugexclude", cat));
         }
     }
 
     // Checkmempool and checkblockindex default to true in regtest mode
     int ratio = std::min<int>(
         std::max<int>(
             gArgs.GetArg("-checkmempool",
                          chainparams.DefaultConsistencyChecks() ? 1 : 0),
             0),
         1000000);
     if (ratio != 0) {
         g_mempool.setSanityCheck(1.0 / ratio);
     }
     fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex",
                                         chainparams.DefaultConsistencyChecks());
     fCheckpointsEnabled =
         gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED);
     if (fCheckpointsEnabled) {
         LogPrintf("Checkpoints will be verified.\n");
     } else {
         LogPrintf("Skipping checkpoint verification.\n");
     }
 
     hashAssumeValid = BlockHash::fromHex(
         gArgs.GetArg("-assumevalid",
                      chainparams.GetConsensus().defaultAssumeValid.GetHex()));
     if (!hashAssumeValid.IsNull()) {
         LogPrintf("Assuming ancestors of block %s have valid signatures.\n",
                   hashAssumeValid.GetHex());
     } else {
         LogPrintf("Validating signatures for all blocks.\n");
     }
 
     if (gArgs.IsArgSet("-minimumchainwork")) {
         const std::string minChainWorkStr =
             gArgs.GetArg("-minimumchainwork", "");
         if (!IsHexNumber(minChainWorkStr)) {
             return InitError(strprintf(
                 "Invalid non-hex (%s) minimum chain work value specified",
                 minChainWorkStr));
         }
         nMinimumChainWork = UintToArith256(uint256S(minChainWorkStr));
     } else {
         nMinimumChainWork =
             UintToArith256(chainparams.GetConsensus().nMinimumChainWork);
     }
     LogPrintf("Setting nMinimumChainWork=%s\n", nMinimumChainWork.GetHex());
     if (nMinimumChainWork <
         UintToArith256(chainparams.GetConsensus().nMinimumChainWork)) {
         LogPrintf("Warning: nMinimumChainWork set below default value of %s\n",
                   chainparams.GetConsensus().nMinimumChainWork.GetHex());
     }
 
     // mempool limits
     int64_t nMempoolSizeMax =
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
     int64_t nMempoolSizeMin =
         gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) *
         1000 * 40;
     if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) {
         return InitError(
             strprintf(_("-maxmempool must be at least %d MB").translated,
                       std::ceil(nMempoolSizeMin / 1000000.0)));
     }
 
     // -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency
     nScriptCheckThreads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS);
     if (nScriptCheckThreads <= 0) {
         nScriptCheckThreads += GetNumCores();
     }
     if (nScriptCheckThreads <= 1) {
         nScriptCheckThreads = 0;
     } else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS) {
         nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS;
     }
 
     // Configure excessive block size.
     const uint64_t nProposedExcessiveBlockSize =
         gArgs.GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE);
     if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) {
         return InitError(
             _("Excessive block size must be > 1,000,000 bytes (1MB)")
                 .translated);
     }
 
     // Check blockmaxsize does not exceed maximum accepted block size.
     const uint64_t nProposedMaxGeneratedBlockSize =
         gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE);
     if (nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) {
         auto msg = _("Max generated block size (blockmaxsize) cannot exceed "
                      "the excessive block size (excessiveblocksize)")
                        .translated;
         return InitError(msg);
     }
 
     // block pruning; get the amount of disk space (in MiB) to allot for block &
     // undo files
     int64_t nPruneArg = gArgs.GetArg("-prune", 0);
     if (nPruneArg < 0) {
         return InitError(
             _("Prune cannot be configured with a negative value.").translated);
     }
     nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024;
     if (nPruneArg == 1) {
         // manual pruning: -prune=1
         LogPrintf("Block pruning enabled.  Use RPC call "
                   "pruneblockchain(height) to manually prune block and undo "
                   "files.\n");
         nPruneTarget = std::numeric_limits<uint64_t>::max();
         fPruneMode = true;
     } else if (nPruneTarget) {
         if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
             return InitError(
                 strprintf(_("Prune configured below the minimum of %d MiB. "
                             "Please use a higher number.")
                               .translated,
                           MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024));
         }
         LogPrintf("Prune configured to target %uMiB on disk for block and undo "
                   "files.\n",
                   nPruneTarget / 1024 / 1024);
         fPruneMode = true;
     }
 
     nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT);
     if (nConnectTimeout <= 0) {
         nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
     }
 
     peer_connect_timeout =
         gArgs.GetArg("-peertimeout", DEFAULT_PEER_CONNECT_TIMEOUT);
     if (peer_connect_timeout <= 0) {
         return InitError(
             "peertimeout cannot be configured with a negative value.");
     }
 
     // Obtain the amount to charge excess UTXO
     if (gArgs.IsArgSet("-excessutxocharge")) {
         Amount n = Amount::zero();
         auto parsed = ParseMoney(gArgs.GetArg("-excessutxocharge", ""), n);
         if (!parsed || Amount::zero() > n) {
             return InitError(AmountErrMsg(
                 "excessutxocharge", gArgs.GetArg("-excessutxocharge", "")));
         }
         config.SetExcessUTXOCharge(n);
     } else {
         config.SetExcessUTXOCharge(DEFAULT_UTXO_FEE);
     }
 
     if (gArgs.IsArgSet("-minrelaytxfee")) {
         Amount n = Amount::zero();
         auto parsed = ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n);
         if (!parsed || n == Amount::zero()) {
             return InitError(AmountErrMsg("minrelaytxfee",
                                           gArgs.GetArg("-minrelaytxfee", "")));
         }
         // High fee check is done afterward in WalletParameterInteraction()
         ::minRelayTxFee = CFeeRate(n);
     }
 
     // Sanity check argument for min fee for including tx in block
     // TODO: Harmonize which arguments need sanity checking and where that
     // happens.
     if (gArgs.IsArgSet("-blockmintxfee")) {
         Amount n = Amount::zero();
         if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n)) {
             return InitError(AmountErrMsg("blockmintxfee",
                                           gArgs.GetArg("-blockmintxfee", "")));
         }
     }
 
     // Feerate used to define dust.  Shouldn't be changed lightly as old
     // implementations may inadvertently create non-standard transactions.
     if (gArgs.IsArgSet("-dustrelayfee")) {
         Amount n = Amount::zero();
         auto parsed = ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n);
         if (!parsed || Amount::zero() == n) {
             return InitError(AmountErrMsg("dustrelayfee",
                                           gArgs.GetArg("-dustrelayfee", "")));
         }
         dustRelayFee = CFeeRate(n);
     }
 
     fRequireStandard =
         !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
     if (!chainparams.IsTestChain() && !fRequireStandard) {
         return InitError(
             strprintf("acceptnonstdtxn is not currently supported for %s chain",
                       chainparams.NetworkIDString()));
     }
     nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp);
 
     if (!g_wallet_init_interface.ParameterInteraction()) {
         return false;
     }
 
     fIsBareMultisigStd =
         gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG);
     fAcceptDatacarrier =
         gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER);
 
     // Option to startup with mocktime set (used for regression testing):
     SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op
 
     if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) {
         nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM);
     }
 
     // Signal Bitcoin Cash support.
     // TODO: remove some time after the hardfork when no longer needed
     // to differentiate the network nodes.
     nLocalServices = ServiceFlags(nLocalServices | NODE_BITCOIN_CASH);
 
     nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE);
 
     return true;
 }
 
 static bool LockDataDirectory(bool probeOnly) {
     // Make sure only a single Bitcoin process is using the data directory.
     fs::path datadir = GetDataDir();
     if (!DirIsWritable(datadir)) {
         return InitError(strprintf(
             _("Cannot write to data directory '%s'; check permissions.")
                 .translated,
             datadir.string()));
     }
     if (!LockDirectory(datadir, ".lock", probeOnly)) {
         return InitError(strprintf(_("Cannot obtain a lock on data directory "
                                      "%s. %s is probably already running.")
                                        .translated,
                                    datadir.string(), PACKAGE_NAME));
     }
     return true;
 }
 
 bool AppInitSanityChecks() {
     // Step 4: sanity checks
 
     // Initialize elliptic curve code
     std::string sha256_algo = SHA256AutoDetect();
     LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo);
     RandomInit();
     ECC_Start();
     globalVerifyHandle.reset(new ECCVerifyHandle());
 
     // Sanity check
     if (!InitSanityCheck()) {
         return InitError(strprintf(
             _("Initialization sanity check failed. %s is shutting down.")
                 .translated,
             PACKAGE_NAME));
     }
 
     // Probe the data directory lock to give an early error message, if possible
     // We cannot hold the data directory lock here, as the forking for daemon()
     // hasn't yet happened, and a fork will cause weird behavior to it.
     return LockDataDirectory(true);
 }
 
 bool AppInitLockDataDirectory() {
     // After daemonization get the data directory lock again and hold on to it
     // until exit. This creates a slight window for a race condition to happen,
     // however this condition is harmless: it will at most make us exit without
     // printing a message to console.
     if (!LockDataDirectory(false)) {
         // Detailed error printed inside LockDataDirectory
         return false;
     }
     return true;
 }
 
 bool AppInitMain(Config &config, RPCServer &rpcServer,
                  HTTPRPCRequestProcessor &httpRPCRequestProcessor,
                  NodeContext &node) {
     // Step 4a: application initialization
     const CChainParams &chainparams = config.GetChainParams();
 
     if (!CreatePidFile()) {
         // Detailed error printed inside CreatePidFile().
         return false;
     }
 
     BCLog::Logger &logger = LogInstance();
     if (logger.m_print_to_file) {
         if (gArgs.GetBoolArg("-shrinkdebugfile",
                              logger.DefaultShrinkDebugFile())) {
             // Do this first since it both loads a bunch of debug.log into
             // memory, and because this needs to happen before any other
             // debug.log printing.
             logger.ShrinkDebugFile();
         }
     }
 
     if (!logger.StartLogging()) {
         return InitError(strprintf("Could not open debug log file %s",
                                    logger.m_file_path.string()));
     }
 
     if (!logger.m_log_timestamps) {
         LogPrintf("Startup time: %s\n", FormatISO8601DateTime(GetTime()));
     }
     LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
     LogPrintf("Using data directory %s\n", GetDataDir().string());
 
     // Only log conf file usage message if conf file actually exists.
     fs::path config_file_path =
         GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME));
     if (fs::exists(config_file_path)) {
         LogPrintf("Config file: %s\n", config_file_path.string());
     } else if (gArgs.IsArgSet("-conf")) {
         // Warn if no conf file exists at path provided by user
         InitWarning(strprintf(
             _("The specified config file %s does not exist\n").translated,
             config_file_path.string()));
     } else {
         // Not categorizing as "Warning" because it's the default behavior
         LogPrintf("Config file: %s (not found, skipping)\n",
                   config_file_path.string());
     }
 
     // Log the config arguments to debug.log
     gArgs.LogArgs();
 
     LogPrintf("Using at most %i automatic connections (%i file descriptors "
               "available)\n",
               nMaxConnections, nFD);
 
     // Warn about relative -datadir path.
     if (gArgs.IsArgSet("-datadir") &&
         !fs::path(gArgs.GetArg("-datadir", "")).is_absolute()) {
         LogPrintf("Warning: relative datadir option '%s' specified, which will "
                   "be interpreted relative to the current working directory "
                   "'%s'. This is fragile, because if bitcoin is started in the "
                   "future from a different location, it will be unable to "
                   "locate the current data files. There could also be data "
                   "loss if bitcoin is started while in a temporary "
                   "directory.\n",
                   gArgs.GetArg("-datadir", ""), fs::current_path().string());
     }
 
     InitSignatureCache();
     InitScriptExecutionCache();
 
     LogPrintf("Using %u threads for script verification\n",
               nScriptCheckThreads);
     if (nScriptCheckThreads) {
         for (int i = 0; i < nScriptCheckThreads - 1; i++) {
             threadGroup.create_thread([i]() { return ThreadScriptCheck(i); });
         }
     }
 
     // Start the lightweight task scheduler thread
     CScheduler::Function serviceLoop =
         std::bind(&CScheduler::serviceQueue, &scheduler);
     threadGroup.create_thread(std::bind(&TraceThread<CScheduler::Function>,
                                         "scheduler", serviceLoop));
 
     // Gather some entropy once per minute.
     scheduler.scheduleEvery(
         [] {
             RandAddPeriodic();
             return true;
         },
         60000);
 
     GetMainSignals().RegisterBackgroundSignalScheduler(scheduler);
-    GetMainSignals().RegisterWithMempoolSignals(g_mempool);
 
     // Create client interfaces for wallets that are supposed to be loaded
     // according to -wallet and -disablewallet options. This only constructs
     // the interfaces, it doesn't load wallet data. Wallets actually get loaded
     // when load() and start() interface methods are called below.
     g_wallet_init_interface.Construct(node);
 
     /**
      * Register RPC commands regardless of -server setting so they will be
      * available in the GUI RPC console even if external calls are disabled.
      */
     RegisterAllRPCCommands(config, rpcServer, tableRPC);
     for (const auto &client : node.chain_clients) {
         client->registerRpcs();
     }
     g_rpc_node = &node;
 #if ENABLE_ZMQ
     RegisterZMQRPCCommands(tableRPC);
 #endif
 
     /**
      * Start the RPC server.  It will be started in "warmup" mode and not
      * process calls yet (but it will verify that the server is there and will
      * be ready later).  Warmup mode will be completed when initialisation is
      * finished.
      */
     if (gArgs.GetBoolArg("-server", false)) {
         uiInterface.InitMessage_connect(SetRPCWarmupStatus);
         if (!AppInitServers(config, httpRPCRequestProcessor)) {
             return InitError(
                 _("Unable to start HTTP server. See debug log for details.")
                     .translated);
         }
     }
 
     // Step 5: verify wallet database integrity
     for (const auto &client : node.chain_clients) {
         if (!client->verify(chainparams)) {
             return false;
         }
     }
 
     // Step 6: network initialization
 
     // Note that we absolutely cannot open any actual connections
     // until the very end ("start node") as the UTXO/block state
     // is not yet setup and may end up being set up twice if we
     // need to reindex later.
 
     assert(!node.banman);
     node.banman = std::make_unique<BanMan>(
         GetDataDir() / "banlist.dat", config.GetChainParams(), &uiInterface,
         gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
     assert(!node.connman);
     node.connman = std::make_unique<CConnman>(
         config, GetRand(std::numeric_limits<uint64_t>::max()),
         GetRand(std::numeric_limits<uint64_t>::max()));
 
     node.peer_logic = std::make_unique<PeerLogicValidation>(
         node.connman.get(), node.banman.get(), scheduler,
         gArgs.GetBoolArg("-enablebip61", DEFAULT_ENABLE_BIP61));
     RegisterValidationInterface(node.peer_logic.get());
 
     // sanitize comments per BIP-0014, format user agent and check total size
     std::vector<std::string> uacomments;
     for (const std::string &cmt : gArgs.GetArgs("-uacomment")) {
         if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT)) {
             return InitError(strprintf(
                 _("User Agent comment (%s) contains unsafe characters.")
                     .translated,
                 cmt));
         }
         uacomments.push_back(cmt);
     }
     const std::string strSubVersion =
         FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments);
     if (strSubVersion.size() > MAX_SUBVERSION_LENGTH) {
         return InitError(strprintf(
             _("Total length of network version string (%i) exceeds maximum "
               "length (%i). Reduce the number or size of uacomments.")
                 .translated,
             strSubVersion.size(), MAX_SUBVERSION_LENGTH));
     }
 
     if (gArgs.IsArgSet("-onlynet")) {
         std::set<enum Network> nets;
         for (const std::string &snet : gArgs.GetArgs("-onlynet")) {
             enum Network net = ParseNetwork(snet);
             if (net == NET_UNROUTABLE) {
                 return InitError(strprintf(
                     _("Unknown network specified in -onlynet: '%s'").translated,
                     snet));
             }
             nets.insert(net);
         }
         for (int n = 0; n < NET_MAX; n++) {
             enum Network net = (enum Network)n;
             if (!nets.count(net)) {
                 SetReachable(net, false);
             }
         }
     }
 
     // Check for host lookup allowed before parsing any network related
     // parameters
     fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP);
 
     bool proxyRandomize =
         gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE);
     // -proxy sets a proxy for all outgoing network traffic
     // -noproxy (or -proxy=0) as well as the empty string can be used to not set
     // a proxy, this is the default
     std::string proxyArg = gArgs.GetArg("-proxy", "");
     SetReachable(NET_ONION, false);
     if (proxyArg != "" && proxyArg != "0") {
         CService proxyAddr;
         if (!Lookup(proxyArg.c_str(), proxyAddr, 9050, fNameLookup)) {
             return InitError(strprintf(
                 _("Invalid -proxy address or hostname: '%s'").translated,
                 proxyArg));
         }
 
         proxyType addrProxy = proxyType(proxyAddr, proxyRandomize);
         if (!addrProxy.IsValid()) {
             return InitError(strprintf(
                 _("Invalid -proxy address or hostname: '%s'").translated,
                 proxyArg));
         }
 
         SetProxy(NET_IPV4, addrProxy);
         SetProxy(NET_IPV6, addrProxy);
         SetProxy(NET_ONION, addrProxy);
         SetNameProxy(addrProxy);
         // by default, -proxy sets onion as reachable, unless -noonion later
         SetReachable(NET_ONION, true);
     }
 
     // -onion can be used to set only a proxy for .onion, or override normal
     // proxy for .onion addresses.
     // -noonion (or -onion=0) disables connecting to .onion entirely. An empty
     // string is used to not override the onion proxy (in which case it defaults
     // to -proxy set above, or none)
     std::string onionArg = gArgs.GetArg("-onion", "");
     if (onionArg != "") {
         if (onionArg == "0") {
             // Handle -noonion/-onion=0
             SetReachable(NET_ONION, false);
         } else {
             CService onionProxy;
             if (!Lookup(onionArg.c_str(), onionProxy, 9050, fNameLookup)) {
                 return InitError(strprintf(
                     _("Invalid -onion address or hostname: '%s'").translated,
                     onionArg));
             }
             proxyType addrOnion = proxyType(onionProxy, proxyRandomize);
             if (!addrOnion.IsValid()) {
                 return InitError(strprintf(
                     _("Invalid -onion address or hostname: '%s'").translated,
                     onionArg));
             }
             SetProxy(NET_ONION, addrOnion);
             SetReachable(NET_ONION, true);
         }
     }
 
     // see Step 2: parameter interactions for more information about these
     fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN);
     fDiscover = gArgs.GetBoolArg("-discover", true);
     g_relay_txes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
 
     for (const std::string &strAddr : gArgs.GetArgs("-externalip")) {
         CService addrLocal;
         if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) &&
             addrLocal.IsValid()) {
             AddLocal(addrLocal, LOCAL_MANUAL);
         } else {
             return InitError(ResolveErrMsg("externalip", strAddr));
         }
     }
 
 #if ENABLE_ZMQ
     g_zmq_notification_interface = CZMQNotificationInterface::Create();
 
     if (g_zmq_notification_interface) {
         RegisterValidationInterface(g_zmq_notification_interface);
     }
 #endif
     // unlimited unless -maxuploadtarget is set
     uint64_t nMaxOutboundLimit = 0;
     uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME;
 
     if (gArgs.IsArgSet("-maxuploadtarget")) {
         nMaxOutboundLimit =
             gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 *
             1024;
     }
 
     // Step 6.5 (I guess ?): Initialize Avalanche.
     g_avalanche = std::make_unique<AvalancheProcessor>(node.connman.get());
 
     // Step 7: load block chain
 
     fReindex = gArgs.GetBoolArg("-reindex", false);
     bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false);
 
     // cache size calculations
     int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20);
     // total cache cannot be less than nMinDbCache
     nTotalCache = std::max(nTotalCache, nMinDbCache << 20);
     // total cache cannot be greater than nMaxDbcache
     nTotalCache = std::min(nTotalCache, nMaxDbCache << 20);
     int64_t nBlockTreeDBCache =
         std::min(nTotalCache / 8, nMaxBlockDBCache << 20);
     nTotalCache -= nBlockTreeDBCache;
     int64_t nTxIndexCache =
         std::min(nTotalCache / 8, gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)
                                       ? nMaxTxIndexCache << 20
                                       : 0);
     nTotalCache -= nTxIndexCache;
     int64_t filter_index_cache = 0;
     if (!g_enabled_filter_types.empty()) {
         size_t n_indexes = g_enabled_filter_types.size();
         int64_t max_cache =
             std::min(nTotalCache / 8, max_filter_index_cache << 20);
         filter_index_cache = max_cache / n_indexes;
         nTotalCache -= filter_index_cache * n_indexes;
     }
     // use 25%-50% of the remainder for disk cache
     int64_t nCoinDBCache =
         std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23));
     // cap total coins db cache
     nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20);
     nTotalCache -= nCoinDBCache;
     // the rest goes to in-memory cache
     nCoinCacheUsage = nTotalCache;
     int64_t nMempoolSizeMax =
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
     LogPrintf("Cache configuration:\n");
     LogPrintf("* Using %.1fMiB for block index database\n",
               nBlockTreeDBCache * (1.0 / 1024 / 1024));
     if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
         LogPrintf("* Using %.1fMiB for transaction index database\n",
                   nTxIndexCache * (1.0 / 1024 / 1024));
     }
     for (BlockFilterType filter_type : g_enabled_filter_types) {
         LogPrintf("* Using %.1f MiB for %s block filter index database\n",
                   filter_index_cache * (1.0 / 1024 / 1024),
                   BlockFilterTypeName(filter_type));
     }
     LogPrintf("* Using %.1fMiB for chain state database\n",
               nCoinDBCache * (1.0 / 1024 / 1024));
     LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of "
               "unused mempool space)\n",
               nCoinCacheUsage * (1.0 / 1024 / 1024),
               nMempoolSizeMax * (1.0 / 1024 / 1024));
 
     int64_t nStart = 0;
     bool fLoaded = false;
     while (!fLoaded && !ShutdownRequested()) {
         bool fReset = fReindex;
         std::string strLoadError;
 
         uiInterface.InitMessage(_("Loading block index...").translated);
         nStart = GetTimeMillis();
         do {
             try {
                 LOCK(cs_main);
                 UnloadBlockIndex();
                 pcoinsTip.reset();
                 pcoinsdbview.reset();
                 pcoinscatcher.reset();
                 pblocktree.reset(
                     new CBlockTreeDB(nBlockTreeDBCache, false, fReset));
 
                 if (fReset) {
                     pblocktree->WriteReindexing(true);
                     // If we're reindexing in prune mode, wipe away unusable
                     // block files and all undo data files
                     if (fPruneMode) {
                         CleanupBlockRevFiles();
                     }
                 }
 
                 if (ShutdownRequested()) {
                     break;
                 }
 
                 const Consensus::Params &params = chainparams.GetConsensus();
 
                 // LoadBlockIndex will load fHavePruned if we've ever removed a
                 // block file from disk.
                 // Note that it also sets fReindex based on the disk flag!
                 // From here on out fReindex and fReset mean something
                 // different!
                 if (!LoadBlockIndex(params)) {
                     strLoadError = _("Error loading block database").translated;
                     break;
                 }
 
                 // If the loaded chain has a wrong genesis, bail out immediately
                 // (we're likely using a testnet datadir, or the other way
                 // around).
                 if (!mapBlockIndex.empty() &&
                     !LookupBlockIndex(params.hashGenesisBlock)) {
                     return InitError(_("Incorrect or no genesis block found. "
                                        "Wrong datadir for network?")
                                          .translated);
                 }
 
                 // Check for changed -prune state.  What we are concerned about
                 // is a user who has pruned blocks in the past, but is now
                 // trying to run unpruned.
                 if (fHavePruned && !fPruneMode) {
                     strLoadError =
                         _("You need to rebuild the database using -reindex to "
                           "go back to unpruned mode.  This will redownload the "
                           "entire blockchain")
                             .translated;
                     break;
                 }
 
                 // At this point blocktree args are consistent with what's on
                 // disk. If we're not mid-reindex (based on disk + args), add a
                 // genesis block on disk (otherwise we use the one already on
                 // disk).
                 // This is called again in ThreadImport after the reindex
                 // completes.
                 if (!fReindex && !LoadGenesisBlock(chainparams)) {
                     strLoadError =
                         _("Error initializing block database").translated;
                     break;
                 }
 
                 // At this point we're either in reindex or we've loaded a
                 // useful block tree into mapBlockIndex!
 
                 pcoinsdbview.reset(new CCoinsViewDB(
                     nCoinDBCache, false, fReset || fReindexChainState));
                 pcoinscatcher.reset(
                     new CCoinsViewErrorCatcher(pcoinsdbview.get()));
 
                 // If necessary, upgrade from older database format.
                 // This is a no-op if we cleared the coinsviewdb with -reindex
                 // or -reindex-chainstate
                 if (!pcoinsdbview->Upgrade()) {
                     strLoadError =
                         _("Error upgrading chainstate database").translated;
                     break;
                 }
 
                 // ReplayBlocks is a no-op if we cleared the coinsviewdb with
                 // -reindex or -reindex-chainstate
                 if (!ReplayBlocks(params, pcoinsdbview.get())) {
                     strLoadError =
                         _("Unable to replay blocks. You will need to rebuild "
                           "the database using -reindex-chainstate.")
                             .translated;
                     break;
                 }
 
                 // The on-disk coinsdb is now in a good state, create the cache
                 pcoinsTip.reset(new CCoinsViewCache(pcoinscatcher.get()));
 
                 bool is_coinsview_empty = fReset || fReindexChainState ||
                                           pcoinsTip->GetBestBlock().IsNull();
                 if (!is_coinsview_empty) {
                     // LoadChainTip sets ::ChainActive() based on pcoinsTip's
                     // best block
                     if (!LoadChainTip(config)) {
                         strLoadError =
                             _("Error initializing block database").translated;
                         break;
                     }
                     assert(::ChainActive().Tip() != nullptr);
 
                     uiInterface.InitMessage(
                         _("Verifying blocks...").translated);
                     if (fHavePruned &&
                         gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) >
                             MIN_BLOCKS_TO_KEEP) {
                         LogPrintf(
                             "Prune: pruned datadir may not have more than %d "
                             "blocks; only checking available blocks\n",
                             MIN_BLOCKS_TO_KEEP);
                     }
 
                     CBlockIndex *tip = ::ChainActive().Tip();
                     RPCNotifyBlockChange(true, tip);
                     if (tip && tip->nTime >
                                    GetAdjustedTime() + MAX_FUTURE_BLOCK_TIME) {
                         strLoadError =
                             _("The block database contains a block which "
                               "appears to be from the future. This may be due "
                               "to your computer's date and time being set "
                               "incorrectly. Only rebuild the block database if "
                               "you are sure that your computer's date and time "
                               "are correct")
                                 .translated;
                         break;
                     }
 
                     if (!CVerifyDB().VerifyDB(
                             config, pcoinsdbview.get(),
                             gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL),
                             gArgs.GetArg("-checkblocks",
                                          DEFAULT_CHECKBLOCKS))) {
                         strLoadError =
                             _("Corrupted block database detected").translated;
                         break;
                     }
                 }
             } catch (const std::exception &e) {
                 LogPrintf("%s\n", e.what());
                 strLoadError = _("Error opening block database").translated;
                 break;
             }
 
             fLoaded = true;
         } while (false);
 
         if (!fLoaded && !ShutdownRequested()) {
             // first suggest a reindex
             if (!fReset) {
                 bool fRet = uiInterface.ThreadSafeQuestion(
                     strLoadError + ".\n\n" +
                         _("Do you want to rebuild the block database now?")
                             .translated,
                     strLoadError + ".\nPlease restart with -reindex or "
                                    "-reindex-chainstate to recover.",
                     "",
                     CClientUIInterface::MSG_ERROR |
                         CClientUIInterface::BTN_ABORT);
                 if (fRet) {
                     fReindex = true;
                     AbortShutdown();
                 } else {
                     LogPrintf("Aborted block database rebuild. Exiting.\n");
                     return false;
                 }
             } else {
                 return InitError(strLoadError);
             }
         }
     }
 
     // As LoadBlockIndex can take several minutes, it's possible the user
     // requested to kill the GUI during the last operation. If so, exit.
     // As the program has not fully started yet, Shutdown() is possibly
     // overkill.
     if (ShutdownRequested()) {
         LogPrintf("Shutdown requested. Exiting.\n");
         return false;
     }
     if (fLoaded) {
         LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart);
     }
 
     // Encoded addresses using cashaddr instead of base58.
     // We do this by default to avoid confusion with BTC addresses.
     config.SetCashAddrEncoding(gArgs.GetBoolArg("-usecashaddr", true));
 
     // Now that the chain state is loaded, make mempool generally available in
     // the node context. For example the connection manager, wallet, or RPC
     // threads, which are all started after this, may use it from the node
     // context.
     assert(!node.mempool);
     node.mempool = &::g_mempool;
 
     // Step 8: load indexers
     if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
         g_txindex = std::make_unique<TxIndex>(nTxIndexCache, false, fReindex);
         g_txindex->Start();
     }
 
     for (const auto &filter_type : g_enabled_filter_types) {
         InitBlockFilterIndex(filter_type, filter_index_cache, false, fReindex);
         GetBlockFilterIndex(filter_type)->Start();
     }
 
     // Step 9: load wallet
     for (const auto &client : node.chain_clients) {
         if (!client->load(chainparams)) {
             return false;
         }
     }
 
     // Step 10: data directory maintenance
 
     // if pruning, unset the service bit and perform the initial blockstore
     // prune after any wallet rescanning has taken place.
     if (fPruneMode) {
         LogPrintf("Unsetting NODE_NETWORK on prune mode\n");
         nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK);
         if (!fReindex) {
             uiInterface.InitMessage(_("Pruning blockstore...").translated);
             ::ChainstateActive().PruneAndFlush();
         }
     }
 
     // Step 11: import blocks
     if (!CheckDiskSpace(GetDataDir())) {
         InitError(strprintf(_("Error: Disk space is low for %s").translated,
                             GetDataDir()));
         return false;
     }
     if (!CheckDiskSpace(GetBlocksDir())) {
         InitError(strprintf(_("Error: Disk space is low for %s").translated,
                             GetBlocksDir()));
         return false;
     }
 
     // Either install a handler to notify us when genesis activates, or set
     // fHaveGenesis directly.
     // No locking, as this happens before any background thread is started.
     if (::ChainActive().Tip() == nullptr) {
         uiInterface.NotifyBlockTip_connect(BlockNotifyGenesisWait);
     } else {
         fHaveGenesis = true;
     }
 
     if (gArgs.IsArgSet("-blocknotify")) {
         uiInterface.NotifyBlockTip_connect(BlockNotifyCallback);
     }
 
     std::vector<fs::path> vImportFiles;
     for (const std::string &strFile : gArgs.GetArgs("-loadblock")) {
         vImportFiles.push_back(strFile);
     }
 
     threadGroup.create_thread(
         std::bind(&ThreadImport, std::ref(config), vImportFiles));
 
     // Wait for genesis block to be processed
     {
         WAIT_LOCK(g_genesis_wait_mutex, lock);
         // We previously could hang here if StartShutdown() is called prior to
         // ThreadImport getting started, so instead we just wait on a timer to
         // check ShutdownRequested() regularly.
         while (!fHaveGenesis && !ShutdownRequested()) {
             g_genesis_wait_cv.wait_for(lock, std::chrono::milliseconds(500));
         }
         uiInterface.NotifyBlockTip_disconnect(BlockNotifyGenesisWait);
     }
 
     if (ShutdownRequested()) {
         return false;
     }
 
     // Step 12: start node
 
     int chain_active_height;
 
     //// debug print
     {
         LOCK(cs_main);
         LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size());
         chain_active_height = ::ChainActive().Height();
     }
     LogPrintf("nBestHeight = %d\n", chain_active_height);
 
     if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) {
         StartTorControl();
     }
 
     Discover();
 
     // Map ports with UPnP
     if (gArgs.GetBoolArg("-upnp", DEFAULT_UPNP)) {
         StartMapPort();
     }
 
     CConnman::Options connOptions;
     connOptions.nLocalServices = nLocalServices;
     connOptions.nMaxConnections = nMaxConnections;
     connOptions.nMaxOutbound =
         std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections);
     connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS;
     connOptions.nMaxFeeler = 1;
     connOptions.nBestHeight = chain_active_height;
     connOptions.uiInterface = &uiInterface;
     connOptions.m_banman = node.banman.get();
     connOptions.m_msgproc = node.peer_logic.get();
     connOptions.nSendBufferMaxSize =
         1000 * gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER);
     connOptions.nReceiveFloodSize =
         1000 * gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER);
     connOptions.m_added_nodes = gArgs.GetArgs("-addnode");
 
     connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe;
     connOptions.nMaxOutboundLimit = nMaxOutboundLimit;
     connOptions.m_peer_connect_timeout = peer_connect_timeout;
 
     for (const std::string &strBind : gArgs.GetArgs("-bind")) {
         CService addrBind;
         if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) {
             return InitError(ResolveErrMsg("bind", strBind));
         }
         connOptions.vBinds.push_back(addrBind);
     }
 
     for (const std::string &strBind : gArgs.GetArgs("-whitebind")) {
         NetWhitebindPermissions whitebind;
         std::string error;
         if (!NetWhitebindPermissions::TryParse(strBind, whitebind, error)) {
             return InitError(error);
         }
         connOptions.vWhiteBinds.push_back(whitebind);
     }
 
     for (const auto &net : gArgs.GetArgs("-whitelist")) {
         NetWhitelistPermissions subnet;
         std::string error;
         if (!NetWhitelistPermissions::TryParse(net, subnet, error)) {
             return InitError(error);
         }
         connOptions.vWhitelistedRange.push_back(subnet);
     }
 
     connOptions.vSeedNodes = gArgs.GetArgs("-seednode");
 
     // Initiate outbound connections unless connect=0
     connOptions.m_use_addrman_outgoing = !gArgs.IsArgSet("-connect");
     if (!connOptions.m_use_addrman_outgoing) {
         const auto connect = gArgs.GetArgs("-connect");
         if (connect.size() != 1 || connect[0] != "0") {
             connOptions.m_specified_outgoing = connect;
         }
     }
     if (!node.connman->Start(scheduler, connOptions)) {
         return false;
     }
 
     // Step 13: finished
 
     SetRPCWarmupFinished();
     uiInterface.InitMessage(_("Done loading").translated);
 
     for (const auto &client : node.chain_clients) {
         client->start(scheduler);
     }
 
     BanMan *banman = node.banman.get();
     scheduler.scheduleEvery(
         [banman] {
             banman->DumpBanlist();
             return true;
         },
         DUMP_BANS_INTERVAL * 1000);
 
     // Start Avalanche's event loop.
     g_avalanche->startEventLoop(scheduler);
 
     return true;
 }
diff --git a/src/test/policyestimator_tests.cpp b/src/test/policyestimator_tests.cpp
index 206fa71b1..d3cbe4d9d 100644
--- a/src/test/policyestimator_tests.cpp
+++ b/src/test/policyestimator_tests.cpp
@@ -1,101 +1,99 @@
 // Copyright (c) 2011-2019 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <policy/fees.h>
 #include <policy/policy.h>
 
 #include <txmempool.h>
 #include <uint256.h>
 #include <util/system.h>
 #include <util/time.h>
 
 #include <test/util/setup_common.h>
 
 #include <boost/test/unit_test.hpp>
 
-BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, BasicTestingSetup)
+BOOST_FIXTURE_TEST_SUITE(policyestimator_tests, TestingSetup)
 
 BOOST_AUTO_TEST_CASE(MempoolMinimumFeeEstimate) {
     CTxMemPool mpool;
     LOCK2(cs_main, mpool.cs);
     TestMemPoolEntryHelper entry;
 
     // Create a transaction template
     CScript garbage;
     for (unsigned int i = 0; i < 128; i++) {
         garbage.push_back('X');
     }
 
     CMutableTransaction tx;
     tx.vin.resize(1);
     tx.vin[0].scriptSig = garbage;
     tx.vout.resize(1);
     tx.vout[0].nValue = Amount::zero();
 
     // Create a fake block
     std::vector<CTransactionRef> block;
     int blocknum = 0;
 
     // Loop through 200 blocks adding transactions so we have a estimateFee
     // that is calculable.
     while (blocknum < 200) {
         for (int64_t j = 0; j < 100; j++) {
             // make transaction unique
             tx.vin[0].nSequence = 10000 * blocknum + j;
             TxId txid = tx.GetId();
             mpool.addUnchecked(
                 entry.Fee((j + 1) * DEFAULT_BLOCK_MIN_TX_FEE_PER_KB)
                     .Time(GetTime())
                     .Height(blocknum)
                     .FromTx(tx));
             CTransactionRef ptx = mpool.get(txid);
             block.push_back(ptx);
         }
         mpool.removeForBlock(block, ++blocknum);
         block.clear();
     }
 
     // Check that the estimate is above the rolling minimum fee. This should be
     // true since we have not trimmed the mempool.
     BOOST_CHECK(mpool.GetMinFee(1) <= mpool.estimateFee());
 
     // Check that estimateFee returns the minimum rolling fee even when the
     // mempool grows very quickly and no blocks have been mined.
 
     // Add a bunch of low fee transactions which are not in the mempool
     // And have zero fees.
     CMutableTransaction mtx;
     tx.vin.resize(1);
     tx.vin[0].scriptSig = garbage;
     tx.vout.resize(1);
     block.clear();
 
     // Add tons of transactions to the mempool,
     // but don't mine them.
     for (int64_t i = 0; i < 10000; i++) {
         // Mutate the hash
         tx.vin[0].nSequence = 10000 * blocknum + i;
         // Add new transaction to the mempool with a increasing fee
         // The average should end up as 1/2 * 100 *
         // DEFAULT_BLOCK_MIN_TX_FEE_PER_KB
         mpool.addUnchecked(entry.Fee((i + 1) * DEFAULT_BLOCK_MIN_TX_FEE_PER_KB)
                                .Time(GetTime())
                                .Height(blocknum)
                                .FromTx(tx));
     }
 
-    // Trim to size.  GetMinFee should be more than 10000 *
-    // DEFAULT_BLOCK_MIN_TX_FEE_PER_KB But the estimateFee should be
+    // Trim to size. GetMinFee should be more than 10000 *
+    // DEFAULT_BLOCK_MIN_TX_FEE_PER_KB, but the estimateFee should remain
     // unchanged.
     mpool.TrimToSize(1);
-
     BOOST_CHECK(mpool.GetMinFee(1) >=
                 CFeeRate(10000 * DEFAULT_BLOCK_MIN_TX_FEE_PER_KB,
                          CTransaction(tx).GetTotalSize()));
-
     BOOST_CHECK_MESSAGE(mpool.estimateFee() == mpool.GetMinFee(1),
                         "Confirm blocks has failed");
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 06eb08a93..acc8f4848 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -1,1422 +1,1430 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <txmempool.h>
 
 #include <chain.h>
 #include <chainparams.h> // for GetConsensus.
 #include <clientversion.h>
 #include <config.h>
 #include <consensus/consensus.h>
 #include <consensus/tx_verify.h>
 #include <consensus/validation.h>
 #include <policy/fees.h>
 #include <policy/policy.h>
 #include <policy/settings.h>
 #include <reverse_iterator.h>
 #include <util/moneystr.h>
 #include <util/system.h>
 #include <util/time.h>
 #include <validation.h>
+#include <validationinterface.h>
 #include <version.h>
 
 #include <algorithm>
 
 CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef &_tx, const Amount _nFee,
                                  int64_t _nTime, unsigned int _entryHeight,
                                  bool _spendsCoinbase, int64_t _sigOpsCount,
                                  LockPoints lp)
     : tx(_tx), nFee(_nFee), nTxSize(tx->GetTotalSize()),
       nUsageSize(RecursiveDynamicUsage(tx)), nTime(_nTime),
       entryHeight(_entryHeight), spendsCoinbase(_spendsCoinbase),
       sigOpCount(_sigOpsCount), lockPoints(lp) {
     nCountWithDescendants = 1;
     nSizeWithDescendants = GetTxSize();
     nSigOpCountWithDescendants = sigOpCount;
     nModFeesWithDescendants = nFee;
 
     feeDelta = Amount::zero();
 
     nCountWithAncestors = 1;
     nSizeWithAncestors = GetTxSize();
     nModFeesWithAncestors = nFee;
     nSigOpCountWithAncestors = sigOpCount;
 }
 
 size_t CTxMemPoolEntry::GetTxVirtualSize() const {
     return GetVirtualTransactionSize(nTxSize, sigOpCount);
 }
 
 uint64_t CTxMemPoolEntry::GetVirtualSizeWithDescendants() const {
     // note this is distinct from the sum of descendants' individual virtual
     // sizes, and may be smaller.
     return GetVirtualTransactionSize(nSizeWithDescendants,
                                      nSigOpCountWithDescendants);
 }
 
 uint64_t CTxMemPoolEntry::GetVirtualSizeWithAncestors() const {
     // note this is distinct from the sum of ancestors' individual virtual
     // sizes, and may be smaller.
     return GetVirtualTransactionSize(nSizeWithAncestors,
                                      nSigOpCountWithAncestors);
 }
 
 void CTxMemPoolEntry::UpdateFeeDelta(Amount newFeeDelta) {
     nModFeesWithDescendants += newFeeDelta - feeDelta;
     nModFeesWithAncestors += newFeeDelta - feeDelta;
     feeDelta = newFeeDelta;
 }
 
 void CTxMemPoolEntry::UpdateLockPoints(const LockPoints &lp) {
     lockPoints = lp;
 }
 
 // Update the given tx for any in-mempool descendants.
 // Assumes that setMemPoolChildren is correct for the given tx and all
 // descendants.
 void CTxMemPool::UpdateForDescendants(txiter updateIt,
                                       cacheMap &cachedDescendants,
                                       const std::set<TxId> &setExclude) {
     setEntries stageEntries, setAllDescendants;
     stageEntries = GetMemPoolChildren(updateIt);
 
     while (!stageEntries.empty()) {
         const txiter cit = *stageEntries.begin();
         setAllDescendants.insert(cit);
         stageEntries.erase(cit);
         const setEntries &setChildren = GetMemPoolChildren(cit);
         for (txiter childEntry : setChildren) {
             cacheMap::iterator cacheIt = cachedDescendants.find(childEntry);
             if (cacheIt != cachedDescendants.end()) {
                 // We've already calculated this one, just add the entries for
                 // this set but don't traverse again.
                 for (txiter cacheEntry : cacheIt->second) {
                     setAllDescendants.insert(cacheEntry);
                 }
             } else if (!setAllDescendants.count(childEntry)) {
                 // Schedule for later processing
                 stageEntries.insert(childEntry);
             }
         }
     }
     // setAllDescendants now contains all in-mempool descendants of updateIt.
     // Update and add to cached descendant map
     int64_t modifySize = 0;
     int64_t modifyCount = 0;
     Amount modifyFee = Amount::zero();
     int64_t modifySigOpCount = 0;
     for (txiter cit : setAllDescendants) {
         if (!setExclude.count(cit->GetTx().GetId())) {
             modifySize += cit->GetTxSize();
             modifyFee += cit->GetModifiedFee();
             modifyCount++;
             modifySigOpCount += cit->GetSigOpCount();
             cachedDescendants[updateIt].insert(cit);
             // Update ancestor state for each descendant
             mapTx.modify(cit,
                          update_ancestor_state(updateIt->GetTxSize(),
                                                updateIt->GetModifiedFee(), 1,
                                                updateIt->GetSigOpCount()));
         }
     }
     mapTx.modify(updateIt,
                  update_descendant_state(modifySize, modifyFee, modifyCount,
                                          modifySigOpCount));
 }
 
 // txidsToUpdate is the set of transaction hashes from a disconnected block
 // which has been re-added to the mempool. For each entry, look for descendants
 // that are outside txidsToUpdate, and add fee/size information for such
 // descendants to the parent. For each such descendant, also update the ancestor
 // state to include the parent.
 void CTxMemPool::UpdateTransactionsFromBlock(
     const std::vector<TxId> &txidsToUpdate) {
     LOCK(cs);
     // For each entry in txidsToUpdate, store the set of in-mempool, but not
     // in-txidsToUpdate transactions, so that we don't have to recalculate
     // descendants when we come across a previously seen entry.
     cacheMap mapMemPoolDescendantsToUpdate;
 
     // Use a set for lookups into txidsToUpdate (these entries are already
     // accounted for in the state of their ancestors)
     std::set<TxId> setAlreadyIncluded(txidsToUpdate.begin(),
                                       txidsToUpdate.end());
 
     // Iterate in reverse, so that whenever we are looking at a transaction
     // we are sure that all in-mempool descendants have already been processed.
     // This maximizes the benefit of the descendant cache and guarantees that
     // setMemPoolChildren will be updated, an assumption made in
     // UpdateForDescendants.
     for (const TxId &txid : reverse_iterate(txidsToUpdate)) {
         // we cache the in-mempool children to avoid duplicate updates
         setEntries setChildren;
         // calculate children from mapNextTx
         txiter it = mapTx.find(txid);
         if (it == mapTx.end()) {
             continue;
         }
 
         auto iter = mapNextTx.lower_bound(COutPoint(txid, 0));
         // First calculate the children, and update setMemPoolChildren to
         // include them, and update their setMemPoolParents to include this tx.
         for (; iter != mapNextTx.end() && iter->first->GetTxId() == txid;
              ++iter) {
             const TxId &childTxId = iter->second->GetId();
             txiter childIter = mapTx.find(childTxId);
             assert(childIter != mapTx.end());
             // We can skip updating entries we've encountered before or that are
             // in the block (which are already accounted for).
             if (setChildren.insert(childIter).second &&
                 !setAlreadyIncluded.count(childTxId)) {
                 UpdateChild(it, childIter, true);
                 UpdateParent(childIter, it, true);
             }
         }
         UpdateForDescendants(it, mapMemPoolDescendantsToUpdate,
                              setAlreadyIncluded);
     }
 }
 
 bool CTxMemPool::CalculateMemPoolAncestors(
     const CTxMemPoolEntry &entry, setEntries &setAncestors,
     uint64_t limitAncestorCount, uint64_t limitAncestorSize,
     uint64_t limitDescendantCount, uint64_t limitDescendantSize,
     std::string &errString, bool fSearchForParents /* = true */) const {
     setEntries parentHashes;
     const CTransaction &tx = entry.GetTx();
 
     if (fSearchForParents) {
         // Get parents of this transaction that are in the mempool
         // GetMemPoolParents() is only valid for entries in the mempool, so we
         // iterate mapTx to find parents.
         for (const CTxIn &in : tx.vin) {
             boost::optional<txiter> piter = GetIter(in.prevout.GetTxId());
             if (!piter) {
                 continue;
             }
             parentHashes.insert(*piter);
             if (parentHashes.size() + 1 > limitAncestorCount) {
                 errString =
                     strprintf("too many unconfirmed parents [limit: %u]",
                               limitAncestorCount);
                 return false;
             }
         }
     } else {
         // If we're not searching for parents, we require this to be an entry in
         // the mempool already.
         txiter it = mapTx.iterator_to(entry);
         parentHashes = GetMemPoolParents(it);
     }
 
     size_t totalSizeWithAncestors = entry.GetTxSize();
 
     while (!parentHashes.empty()) {
         txiter stageit = *parentHashes.begin();
 
         setAncestors.insert(stageit);
         parentHashes.erase(stageit);
         totalSizeWithAncestors += stageit->GetTxSize();
 
         if (stageit->GetSizeWithDescendants() + entry.GetTxSize() >
             limitDescendantSize) {
             errString = strprintf(
                 "exceeds descendant size limit for tx %s [limit: %u]",
                 stageit->GetTx().GetId().ToString(), limitDescendantSize);
             return false;
         }
 
         if (stageit->GetCountWithDescendants() + 1 > limitDescendantCount) {
             errString = strprintf("too many descendants for tx %s [limit: %u]",
                                   stageit->GetTx().GetId().ToString(),
                                   limitDescendantCount);
             return false;
         }
 
         if (totalSizeWithAncestors > limitAncestorSize) {
             errString = strprintf("exceeds ancestor size limit [limit: %u]",
                                   limitAncestorSize);
             return false;
         }
 
         const setEntries &setMemPoolParents = GetMemPoolParents(stageit);
         for (txiter phash : setMemPoolParents) {
             // If this is a new ancestor, add it.
             if (setAncestors.count(phash) == 0) {
                 parentHashes.insert(phash);
             }
             if (parentHashes.size() + setAncestors.size() + 1 >
                 limitAncestorCount) {
                 errString =
                     strprintf("too many unconfirmed ancestors [limit: %u]",
                               limitAncestorCount);
                 return false;
             }
         }
     }
 
     return true;
 }
 
 void CTxMemPool::UpdateAncestorsOf(bool add, txiter it,
                                    setEntries &setAncestors) {
     setEntries parentIters = GetMemPoolParents(it);
     // add or remove this tx as a child of each parent
     for (txiter piter : parentIters) {
         UpdateChild(piter, it, add);
     }
     const int64_t updateCount = (add ? 1 : -1);
     const int64_t updateSize = updateCount * it->GetTxSize();
     const int64_t updateSigOpCount = updateCount * it->GetSigOpCount();
     const Amount updateFee = updateCount * it->GetModifiedFee();
     for (txiter ancestorIt : setAncestors) {
         mapTx.modify(ancestorIt,
                      update_descendant_state(updateSize, updateFee, updateCount,
                                              updateSigOpCount));
     }
 }
 
 void CTxMemPool::UpdateEntryForAncestors(txiter it,
                                          const setEntries &setAncestors) {
     int64_t updateCount = setAncestors.size();
     int64_t updateSize = 0;
     int64_t updateSigOpsCount = 0;
     Amount updateFee = Amount::zero();
 
     for (txiter ancestorIt : setAncestors) {
         updateSize += ancestorIt->GetTxSize();
         updateFee += ancestorIt->GetModifiedFee();
         updateSigOpsCount += ancestorIt->GetSigOpCount();
     }
     mapTx.modify(it, update_ancestor_state(updateSize, updateFee, updateCount,
                                            updateSigOpsCount));
 }
 
 void CTxMemPool::UpdateChildrenForRemoval(txiter it) {
     const setEntries &setMemPoolChildren = GetMemPoolChildren(it);
     for (txiter updateIt : setMemPoolChildren) {
         UpdateParent(updateIt, it, false);
     }
 }
 
 void CTxMemPool::UpdateForRemoveFromMempool(const setEntries &entriesToRemove,
                                             bool updateDescendants) {
     // For each entry, walk back all ancestors and decrement size associated
     // with this transaction.
     const uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
     if (updateDescendants) {
         // updateDescendants should be true whenever we're not recursively
         // removing a tx and all its descendants, eg when a transaction is
         // confirmed in a block. Here we only update statistics and not data in
         // mapLinks (which we need to preserve until we're finished with all
         // operations that need to traverse the mempool).
         for (txiter removeIt : entriesToRemove) {
             setEntries setDescendants;
             CalculateDescendants(removeIt, setDescendants);
             setDescendants.erase(removeIt); // don't update state for self
             int64_t modifySize = -int64_t(removeIt->GetTxSize());
             Amount modifyFee = -1 * removeIt->GetModifiedFee();
             int modifySigOps = -removeIt->GetSigOpCount();
             for (txiter dit : setDescendants) {
                 mapTx.modify(dit, update_ancestor_state(modifySize, modifyFee,
                                                         -1, modifySigOps));
             }
         }
     }
 
     for (txiter removeIt : entriesToRemove) {
         setEntries setAncestors;
         const CTxMemPoolEntry &entry = *removeIt;
         std::string dummy;
         // Since this is a tx that is already in the mempool, we can call CMPA
         // with fSearchForParents = false.  If the mempool is in a consistent
         // state, then using true or false should both be correct, though false
         // should be a bit faster.
         // However, if we happen to be in the middle of processing a reorg, then
         // the mempool can be in an inconsistent state. In this case, the set of
         // ancestors reachable via mapLinks will be the same as the set of
         // ancestors whose packages include this transaction, because when we
         // add a new transaction to the mempool in addUnchecked(), we assume it
         // has no children, and in the case of a reorg where that assumption is
         // false, the in-mempool children aren't linked to the in-block tx's
         // until UpdateTransactionsFromBlock() is called. So if we're being
         // called during a reorg, ie before UpdateTransactionsFromBlock() has
         // been called, then mapLinks[] will differ from the set of mempool
         // parents we'd calculate by searching, and it's important that we use
         // the mapLinks[] notion of ancestor transactions as the set of things
         // to update for removal.
         CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit,
                                   nNoLimit, nNoLimit, dummy, false);
         // Note that UpdateAncestorsOf severs the child links that point to
         // removeIt in the entries for the parents of removeIt.
         UpdateAncestorsOf(false, removeIt, setAncestors);
     }
     // After updating all the ancestor sizes, we can now sever the link between
     // each transaction being removed and any mempool children (ie, update
     // setMemPoolParents for each direct child of a transaction being removed).
     for (txiter removeIt : entriesToRemove) {
         UpdateChildrenForRemoval(removeIt);
     }
 }
 
 void CTxMemPoolEntry::UpdateDescendantState(int64_t modifySize,
                                             Amount modifyFee,
                                             int64_t modifyCount,
                                             int64_t modifySigOpCount) {
     nSizeWithDescendants += modifySize;
     assert(int64_t(nSizeWithDescendants) > 0);
     nModFeesWithDescendants += modifyFee;
     nCountWithDescendants += modifyCount;
     assert(int64_t(nCountWithDescendants) > 0);
     nSigOpCountWithDescendants += modifySigOpCount;
     assert(int64_t(nSigOpCountWithDescendants) >= 0);
 }
 
 void CTxMemPoolEntry::UpdateAncestorState(int64_t modifySize, Amount modifyFee,
                                           int64_t modifyCount,
                                           int64_t modifySigOps) {
     nSizeWithAncestors += modifySize;
     assert(int64_t(nSizeWithAncestors) > 0);
     nModFeesWithAncestors += modifyFee;
     nCountWithAncestors += modifyCount;
     assert(int64_t(nCountWithAncestors) > 0);
     nSigOpCountWithAncestors += modifySigOps;
     assert(int(nSigOpCountWithAncestors) >= 0);
 }
 
 CTxMemPool::CTxMemPool() : nTransactionsUpdated(0) {
     // lock free clear
     _clear();
 
     // Sanity checks off by default for performance, because otherwise accepting
     // transactions becomes O(N^2) where N is the number of transactions in the
     // pool
     nCheckFrequency = 0;
 }
 
 CTxMemPool::~CTxMemPool() {}
 
 bool CTxMemPool::isSpent(const COutPoint &outpoint) const {
     LOCK(cs);
     return mapNextTx.count(outpoint);
 }
 
 unsigned int CTxMemPool::GetTransactionsUpdated() const {
     LOCK(cs);
     return nTransactionsUpdated;
 }
 
 void CTxMemPool::AddTransactionsUpdated(unsigned int n) {
     LOCK(cs);
     nTransactionsUpdated += n;
 }
 
 void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry,
                               setEntries &setAncestors) {
     NotifyEntryAdded(entry.GetSharedTx());
     // Add to memory pool without checking anything.
     // Used by AcceptToMemoryPool(), which DOES do all the appropriate checks.
     indexed_transaction_set::iterator newit = mapTx.insert(entry).first;
     mapLinks.insert(make_pair(newit, TxLinks()));
 
     // Update transaction for any feeDelta created by PrioritiseTransaction
     // TODO: refactor so that the fee delta is calculated before inserting into
     // mapTx.
     Amount feeDelta = Amount::zero();
     ApplyDelta(entry.GetTx().GetId(), feeDelta);
     if (feeDelta != Amount::zero()) {
         mapTx.modify(newit, update_fee_delta(feeDelta));
     }
 
     // Update cachedInnerUsage to include contained transaction's usage.
     // (When we update the entry for in-mempool parents, memory usage will be
     // further updated.)
     cachedInnerUsage += entry.DynamicMemoryUsage();
 
     const CTransaction &tx = newit->GetTx();
     std::set<TxId> setParentTransactions;
     for (const CTxIn &in : tx.vin) {
         mapNextTx.insert(std::make_pair(&in.prevout, &tx));
         setParentTransactions.insert(in.prevout.GetTxId());
     }
     // Don't bother worrying about child transactions of this one. Normal case
     // of a new transaction arriving is that there can't be any children,
     // because such children would be orphans. An exception to that is if a
     // transaction enters that used to be in a block. In that case, our
     // disconnect block logic will call UpdateTransactionsFromBlock to clean up
     // the mess we're leaving here.
 
     // Update ancestors with information about this tx
     for (const auto &pit : GetIterSet(setParentTransactions)) {
         UpdateParent(newit, pit, true);
     }
     UpdateAncestorsOf(true, newit, setAncestors);
     UpdateEntryForAncestors(newit, setAncestors);
 
     nTransactionsUpdated++;
     totalTxSize += entry.GetTxSize();
 
     vTxHashes.emplace_back(tx.GetHash(), newit);
     newit->vTxHashesIdx = vTxHashes.size() - 1;
 }
 
 void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) {
+    CTransactionRef ptx = it->GetSharedTx();
+    NotifyEntryRemoved(ptx, reason);
+    if (reason != MemPoolRemovalReason::BLOCK &&
+        reason != MemPoolRemovalReason::CONFLICT) {
+        GetMainSignals().TransactionRemovedFromMempool(ptx);
+    }
+
     NotifyEntryRemoved(it->GetSharedTx(), reason);
     for (const CTxIn &txin : it->GetTx().vin) {
         mapNextTx.erase(txin.prevout);
     }
 
     if (vTxHashes.size() > 1) {
         vTxHashes[it->vTxHashesIdx] = std::move(vTxHashes.back());
         vTxHashes[it->vTxHashesIdx].second->vTxHashesIdx = it->vTxHashesIdx;
         vTxHashes.pop_back();
         if (vTxHashes.size() * 2 < vTxHashes.capacity()) {
             vTxHashes.shrink_to_fit();
         }
     } else {
         vTxHashes.clear();
     }
 
     totalTxSize -= it->GetTxSize();
     cachedInnerUsage -= it->DynamicMemoryUsage();
     cachedInnerUsage -= memusage::DynamicUsage(mapLinks[it].parents) +
                         memusage::DynamicUsage(mapLinks[it].children);
     mapLinks.erase(it);
     mapTx.erase(it);
     nTransactionsUpdated++;
 }
 
 // Calculates descendants of entry that are not already in setDescendants, and
 // adds to setDescendants. Assumes entryit is already a tx in the mempool and
 // setMemPoolChildren is correct for tx and all descendants. Also assumes that
 // if an entry is in setDescendants already, then all in-mempool descendants of
 // it are already in setDescendants as well, so that we can save time by not
 // iterating over those entries.
 void CTxMemPool::CalculateDescendants(txiter entryit,
                                       setEntries &setDescendants) const {
     setEntries stage;
     if (setDescendants.count(entryit) == 0) {
         stage.insert(entryit);
     }
     // Traverse down the children of entry, only adding children that are not
     // accounted for in setDescendants already (because those children have
     // either already been walked, or will be walked in this iteration).
     while (!stage.empty()) {
         txiter it = *stage.begin();
         setDescendants.insert(it);
         stage.erase(it);
 
         const setEntries &setChildren = GetMemPoolChildren(it);
         for (txiter childiter : setChildren) {
             if (!setDescendants.count(childiter)) {
                 stage.insert(childiter);
             }
         }
     }
 }
 
 void CTxMemPool::removeRecursive(const CTransaction &origTx,
                                  MemPoolRemovalReason reason) {
     // Remove transaction from memory pool.
     LOCK(cs);
     setEntries txToRemove;
     txiter origit = mapTx.find(origTx.GetId());
     if (origit != mapTx.end()) {
         txToRemove.insert(origit);
     } else {
         // When recursively removing but origTx isn't in the mempool be sure to
         // remove any children that are in the pool. This can happen during
         // chain re-orgs if origTx isn't re-accepted into the mempool for any
         // reason.
         for (size_t i = 0; i < origTx.vout.size(); i++) {
             auto it = mapNextTx.find(COutPoint(origTx.GetId(), i));
             if (it == mapNextTx.end()) {
                 continue;
             }
 
             txiter nextit = mapTx.find(it->second->GetId());
             assert(nextit != mapTx.end());
             txToRemove.insert(nextit);
         }
     }
 
     setEntries setAllRemoves;
     for (txiter it : txToRemove) {
         CalculateDescendants(it, setAllRemoves);
     }
 
     RemoveStaged(setAllRemoves, false, reason);
 }
 
 void CTxMemPool::removeForReorg(const Config &config,
                                 const CCoinsViewCache *pcoins,
                                 unsigned int nMemPoolHeight, int flags) {
     // Remove transactions spending a coinbase which are now immature and
     // no-longer-final transactions.
     LOCK(cs);
     setEntries txToRemove;
     for (indexed_transaction_set::const_iterator it = mapTx.begin();
          it != mapTx.end(); it++) {
         const CTransaction &tx = it->GetTx();
         LockPoints lp = it->GetLockPoints();
         bool validLP = TestLockPointValidity(&lp);
 
         CValidationState state;
         if (!ContextualCheckTransactionForCurrentBlock(
                 config.GetChainParams().GetConsensus(), tx, state, flags) ||
             !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) {
             // Note if CheckSequenceLocks fails the LockPoints may still be
             // invalid. So it's critical that we remove the tx and not depend on
             // the LockPoints.
             txToRemove.insert(it);
         } else if (it->GetSpendsCoinbase()) {
             for (const CTxIn &txin : tx.vin) {
                 indexed_transaction_set::const_iterator it2 =
                     mapTx.find(txin.prevout.GetTxId());
                 if (it2 != mapTx.end()) {
                     continue;
                 }
 
                 const Coin &coin = pcoins->AccessCoin(txin.prevout);
                 if (nCheckFrequency != 0) {
                     assert(!coin.IsSpent());
                 }
 
                 if (coin.IsSpent() ||
                     (coin.IsCoinBase() &&
                      int64_t(nMemPoolHeight) - coin.GetHeight() <
                          COINBASE_MATURITY)) {
                     txToRemove.insert(it);
                     break;
                 }
             }
         }
         if (!validLP) {
             mapTx.modify(it, update_lock_points(lp));
         }
     }
     setEntries setAllRemoves;
     for (txiter it : txToRemove) {
         CalculateDescendants(it, setAllRemoves);
     }
     RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG);
 }
 
 void CTxMemPool::removeConflicts(const CTransaction &tx) {
     // Remove transactions which depend on inputs of tx, recursively
     AssertLockHeld(cs);
     for (const CTxIn &txin : tx.vin) {
         auto it = mapNextTx.find(txin.prevout);
         if (it != mapNextTx.end()) {
             const CTransaction &txConflict = *it->second;
             if (txConflict != tx) {
                 ClearPrioritisation(txConflict.GetId());
                 removeRecursive(txConflict, MemPoolRemovalReason::CONFLICT);
             }
         }
     }
 }
 
 /**
  * Called when a block is connected. Removes from mempool and updates the miner
  * fee estimator.
  */
 void CTxMemPool::removeForBlock(const std::vector<CTransactionRef> &vtx,
                                 unsigned int nBlockHeight) {
     LOCK(cs);
 
     DisconnectedBlockTransactions disconnectpool;
     disconnectpool.addForBlock(vtx);
 
     std::vector<const CTxMemPoolEntry *> entries;
     for (const CTransactionRef &tx :
          reverse_iterate(disconnectpool.GetQueuedTx().get<insertion_order>())) {
         const TxId &txid = tx->GetId();
 
         indexed_transaction_set::iterator i = mapTx.find(txid);
         if (i != mapTx.end()) {
             entries.push_back(&*i);
         }
     }
 
     for (const CTransactionRef &tx :
          reverse_iterate(disconnectpool.GetQueuedTx().get<insertion_order>())) {
         txiter it = mapTx.find(tx->GetId());
         if (it != mapTx.end()) {
             setEntries stage;
             stage.insert(it);
             RemoveStaged(stage, true, MemPoolRemovalReason::BLOCK);
         }
         removeConflicts(*tx);
         ClearPrioritisation(tx->GetId());
     }
 
     disconnectpool.clear();
 
     lastRollingFeeUpdate = GetTime();
     blockSinceLastRollingFeeBump = true;
 }
 
 void CTxMemPool::_clear() {
     mapLinks.clear();
     mapTx.clear();
     mapNextTx.clear();
     vTxHashes.clear();
     totalTxSize = 0;
     cachedInnerUsage = 0;
     lastRollingFeeUpdate = GetTime();
     blockSinceLastRollingFeeBump = false;
     rollingMinimumFeeRate = 0;
     ++nTransactionsUpdated;
 }
 
 void CTxMemPool::clear() {
     LOCK(cs);
     _clear();
 }
 
 static void CheckInputsAndUpdateCoins(const CTransaction &tx,
                                       CCoinsViewCache &mempoolDuplicate,
                                       const int64_t spendheight) {
     CValidationState state;
     Amount txfee = Amount::zero();
     bool fCheckResult =
         tx.IsCoinBase() || Consensus::CheckTxInputs(tx, state, mempoolDuplicate,
                                                     spendheight, txfee);
     assert(fCheckResult);
     UpdateCoins(mempoolDuplicate, tx, std::numeric_limits<int>::max());
 }
 
 void CTxMemPool::check(const CCoinsViewCache *pcoins) const {
     LOCK(cs);
     if (nCheckFrequency == 0) {
         return;
     }
 
     if (GetRand(std::numeric_limits<uint32_t>::max()) >= nCheckFrequency) {
         return;
     }
 
     LogPrint(BCLog::MEMPOOL,
              "Checking mempool with %u transactions and %u inputs\n",
              (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size());
 
     uint64_t checkTotal = 0;
     uint64_t innerUsage = 0;
 
     CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache *>(pcoins));
     const int64_t spendheight = GetSpendHeight(mempoolDuplicate);
 
     std::list<const CTxMemPoolEntry *> waitingOnDependants;
     for (indexed_transaction_set::const_iterator it = mapTx.begin();
          it != mapTx.end(); it++) {
         unsigned int i = 0;
         checkTotal += it->GetTxSize();
         innerUsage += it->DynamicMemoryUsage();
         const CTransaction &tx = it->GetTx();
         txlinksMap::const_iterator linksiter = mapLinks.find(it);
         assert(linksiter != mapLinks.end());
         const TxLinks &links = linksiter->second;
         innerUsage += memusage::DynamicUsage(links.parents) +
                       memusage::DynamicUsage(links.children);
         bool fDependsWait = false;
         setEntries setParentCheck;
         for (const CTxIn &txin : tx.vin) {
             // Check that every mempool transaction's inputs refer to available
             // coins, or other mempool tx's.
             indexed_transaction_set::const_iterator it2 =
                 mapTx.find(txin.prevout.GetTxId());
             if (it2 != mapTx.end()) {
                 const CTransaction &tx2 = it2->GetTx();
                 assert(tx2.vout.size() > txin.prevout.GetN() &&
                        !tx2.vout[txin.prevout.GetN()].IsNull());
                 fDependsWait = true;
                 setParentCheck.insert(it2);
             } else {
                 assert(pcoins->HaveCoin(txin.prevout));
             }
             // Check whether its inputs are marked in mapNextTx.
             auto it3 = mapNextTx.find(txin.prevout);
             assert(it3 != mapNextTx.end());
             assert(it3->first == &txin.prevout);
             assert(it3->second == &tx);
             i++;
         }
         assert(setParentCheck == GetMemPoolParents(it));
         // Verify ancestor state is correct.
         setEntries setAncestors;
         uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
         std::string dummy;
         CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit,
                                   nNoLimit, nNoLimit, dummy);
         uint64_t nCountCheck = setAncestors.size() + 1;
         uint64_t nSizeCheck = it->GetTxSize();
         Amount nFeesCheck = it->GetModifiedFee();
         int64_t nSigOpCheck = it->GetSigOpCount();
 
         for (txiter ancestorIt : setAncestors) {
             nSizeCheck += ancestorIt->GetTxSize();
             nFeesCheck += ancestorIt->GetModifiedFee();
             nSigOpCheck += ancestorIt->GetSigOpCount();
         }
 
         assert(it->GetCountWithAncestors() == nCountCheck);
         assert(it->GetSizeWithAncestors() == nSizeCheck);
         assert(it->GetSigOpCountWithAncestors() == nSigOpCheck);
         assert(it->GetModFeesWithAncestors() == nFeesCheck);
 
         // Check children against mapNextTx
         CTxMemPool::setEntries setChildrenCheck;
         auto iter = mapNextTx.lower_bound(COutPoint(it->GetTx().GetId(), 0));
         uint64_t child_sizes = 0;
         int64_t child_sigop_counts = 0;
         for (; iter != mapNextTx.end() &&
                iter->first->GetTxId() == it->GetTx().GetId();
              ++iter) {
             txiter childit = mapTx.find(iter->second->GetId());
             // mapNextTx points to in-mempool transactions
             assert(childit != mapTx.end());
             if (setChildrenCheck.insert(childit).second) {
                 child_sizes += childit->GetTxSize();
                 child_sigop_counts += childit->GetSigOpCount();
             }
         }
         assert(setChildrenCheck == GetMemPoolChildren(it));
         // Also check to make sure size is greater than sum with immediate
         // children. Just a sanity check, not definitive that this calc is
         // correct...
         assert(it->GetSizeWithDescendants() >= child_sizes + it->GetTxSize());
         assert(it->GetSigOpCountWithDescendants() >=
                child_sigop_counts + it->GetSigOpCount());
 
         if (fDependsWait) {
             waitingOnDependants.push_back(&(*it));
         } else {
             CheckInputsAndUpdateCoins(tx, mempoolDuplicate, spendheight);
         }
     }
 
     unsigned int stepsSinceLastRemove = 0;
     while (!waitingOnDependants.empty()) {
         const CTxMemPoolEntry *entry = waitingOnDependants.front();
         waitingOnDependants.pop_front();
         if (!mempoolDuplicate.HaveInputs(entry->GetTx())) {
             waitingOnDependants.push_back(entry);
             stepsSinceLastRemove++;
             assert(stepsSinceLastRemove < waitingOnDependants.size());
         } else {
             CheckInputsAndUpdateCoins(entry->GetTx(), mempoolDuplicate,
                                       spendheight);
             stepsSinceLastRemove = 0;
         }
     }
 
     for (auto it = mapNextTx.cbegin(); it != mapNextTx.cend(); it++) {
         const TxId &txid = it->second->GetId();
         indexed_transaction_set::const_iterator it2 = mapTx.find(txid);
         const CTransaction &tx = it2->GetTx();
         assert(it2 != mapTx.end());
         assert(&tx == it->second);
     }
 
     assert(totalTxSize == checkTotal);
     assert(innerUsage == cachedInnerUsage);
 }
 
 bool CTxMemPool::CompareDepthAndScore(const TxId &txida, const TxId &txidb) {
     LOCK(cs);
     indexed_transaction_set::const_iterator i = mapTx.find(txida);
     if (i == mapTx.end()) {
         return false;
     }
     indexed_transaction_set::const_iterator j = mapTx.find(txidb);
     if (j == mapTx.end()) {
         return true;
     }
     uint64_t counta = i->GetCountWithAncestors();
     uint64_t countb = j->GetCountWithAncestors();
     if (counta == countb) {
         return CompareTxMemPoolEntryByScore()(*i, *j);
     }
     return counta < countb;
 }
 
 namespace {
 class DepthAndScoreComparator {
 public:
     bool
     operator()(const CTxMemPool::indexed_transaction_set::const_iterator &a,
                const CTxMemPool::indexed_transaction_set::const_iterator &b) {
         uint64_t counta = a->GetCountWithAncestors();
         uint64_t countb = b->GetCountWithAncestors();
         if (counta == countb) {
             return CompareTxMemPoolEntryByScore()(*a, *b);
         }
         return counta < countb;
     }
 };
 } // namespace
 
 std::vector<CTxMemPool::indexed_transaction_set::const_iterator>
 CTxMemPool::GetSortedDepthAndScore() const {
     std::vector<indexed_transaction_set::const_iterator> iters;
     AssertLockHeld(cs);
 
     iters.reserve(mapTx.size());
     for (indexed_transaction_set::iterator mi = mapTx.begin();
          mi != mapTx.end(); ++mi) {
         iters.push_back(mi);
     }
 
     std::sort(iters.begin(), iters.end(), DepthAndScoreComparator());
     return iters;
 }
 
 void CTxMemPool::queryHashes(std::vector<uint256> &vtxid) const {
     LOCK(cs);
     auto iters = GetSortedDepthAndScore();
 
     vtxid.clear();
     vtxid.reserve(mapTx.size());
 
     for (auto it : iters) {
         vtxid.push_back(it->GetTx().GetId());
     }
 }
 
 static TxMempoolInfo
 GetInfo(CTxMemPool::indexed_transaction_set::const_iterator it) {
     return TxMempoolInfo{it->GetSharedTx(), it->GetTime(),
                          CFeeRate(it->GetFee(), it->GetTxSize()),
                          it->GetModifiedFee() - it->GetFee()};
 }
 
 std::vector<TxMempoolInfo> CTxMemPool::infoAll() const {
     LOCK(cs);
     auto iters = GetSortedDepthAndScore();
 
     std::vector<TxMempoolInfo> ret;
     ret.reserve(mapTx.size());
     for (auto it : iters) {
         ret.push_back(GetInfo(it));
     }
 
     return ret;
 }
 
 CTransactionRef CTxMemPool::get(const TxId &txid) const {
     LOCK(cs);
     indexed_transaction_set::const_iterator i = mapTx.find(txid);
     if (i == mapTx.end()) {
         return nullptr;
     }
 
     return i->GetSharedTx();
 }
 
 TxMempoolInfo CTxMemPool::info(const TxId &txid) const {
     LOCK(cs);
     indexed_transaction_set::const_iterator i = mapTx.find(txid);
     if (i == mapTx.end()) {
         return TxMempoolInfo();
     }
 
     return GetInfo(i);
 }
 
 CFeeRate CTxMemPool::estimateFee() const {
     LOCK(cs);
 
     uint64_t maxMempoolSize =
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000;
     // minerPolicy uses recent blocks to figure out a reasonable fee.  This
     // may disagree with the rollingMinimumFeerate under certain scenarios
     // where the mempool  increases rapidly, or blocks are being mined which
     // do not contain propagated transactions.
     return std::max(::minRelayTxFee, GetMinFee(maxMempoolSize));
 }
 
 void CTxMemPool::PrioritiseTransaction(const TxId &txid,
                                        const Amount nFeeDelta) {
     {
         LOCK(cs);
         Amount &delta = mapDeltas[txid];
         delta += nFeeDelta;
         txiter it = mapTx.find(txid);
         if (it != mapTx.end()) {
             mapTx.modify(it, update_fee_delta(delta));
             // Now update all ancestors' modified fees with descendants
             setEntries setAncestors;
             uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
             std::string dummy;
             CalculateMemPoolAncestors(*it, setAncestors, nNoLimit, nNoLimit,
                                       nNoLimit, nNoLimit, dummy, false);
             for (txiter ancestorIt : setAncestors) {
                 mapTx.modify(ancestorIt,
                              update_descendant_state(0, nFeeDelta, 0, 0));
             }
 
             // Now update all descendants' modified fees with ancestors
             setEntries setDescendants;
             CalculateDescendants(it, setDescendants);
             setDescendants.erase(it);
             for (txiter descendantIt : setDescendants) {
                 mapTx.modify(descendantIt,
                              update_ancestor_state(0, nFeeDelta, 0, 0));
             }
             ++nTransactionsUpdated;
         }
     }
     LogPrintf("PrioritiseTransaction: %s fee += %s\n", txid.ToString(),
               FormatMoney(nFeeDelta));
 }
 
 void CTxMemPool::ApplyDelta(const TxId &txid, Amount &nFeeDelta) const {
     LOCK(cs);
     std::map<TxId, Amount>::const_iterator pos = mapDeltas.find(txid);
     if (pos == mapDeltas.end()) {
         return;
     }
 
     nFeeDelta += pos->second;
 }
 
 void CTxMemPool::ClearPrioritisation(const TxId &txid) {
     LOCK(cs);
     mapDeltas.erase(txid);
 }
 
 const CTransaction *CTxMemPool::GetConflictTx(const COutPoint &prevout) const {
     const auto it = mapNextTx.find(prevout);
     return it == mapNextTx.end() ? nullptr : it->second;
 }
 
 boost::optional<CTxMemPool::txiter>
 CTxMemPool::GetIter(const TxId &txid) const {
     auto it = mapTx.find(txid);
     if (it != mapTx.end()) {
         return it;
     }
     return boost::optional<txiter>{};
 }
 
 CTxMemPool::setEntries
 CTxMemPool::GetIterSet(const std::set<TxId> &txids) const {
     CTxMemPool::setEntries ret;
     for (const auto &txid : txids) {
         const auto mi = GetIter(txid);
         if (mi) {
             ret.insert(*mi);
         }
     }
     return ret;
 }
 
 bool CTxMemPool::HasNoInputsOf(const CTransaction &tx) const {
     for (const CTxIn &in : tx.vin) {
         if (exists(in.prevout.GetTxId())) {
             return false;
         }
     }
 
     return true;
 }
 
 CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView *baseIn,
                                      const CTxMemPool &mempoolIn)
     : CCoinsViewBacked(baseIn), mempool(mempoolIn) {}
 
 bool CCoinsViewMemPool::GetCoin(const COutPoint &outpoint, Coin &coin) const {
     // If an entry in the mempool exists, always return that one, as it's
     // guaranteed to never conflict with the underlying cache, and it cannot
     // have pruned entries (as it contains full) transactions. First checking
     // the underlying cache risks returning a pruned entry instead.
     CTransactionRef ptx = mempool.get(outpoint.GetTxId());
     if (ptx) {
         if (outpoint.GetN() < ptx->vout.size()) {
             coin = Coin(ptx->vout[outpoint.GetN()], MEMPOOL_HEIGHT, false);
             return true;
         }
         return false;
     }
     return base->GetCoin(outpoint, coin);
 }
 
 size_t CTxMemPool::DynamicMemoryUsage() const {
     LOCK(cs);
     // Estimate the overhead of mapTx to be 12 pointers + an allocation, as no
     // exact formula for boost::multi_index_contained is implemented.
     return memusage::MallocUsage(sizeof(CTxMemPoolEntry) +
                                  12 * sizeof(void *)) *
                mapTx.size() +
            memusage::DynamicUsage(mapNextTx) +
            memusage::DynamicUsage(mapDeltas) +
            memusage::DynamicUsage(mapLinks) +
            memusage::DynamicUsage(vTxHashes) + cachedInnerUsage;
 }
 
 void CTxMemPool::RemoveStaged(setEntries &stage, bool updateDescendants,
                               MemPoolRemovalReason reason) {
     AssertLockHeld(cs);
     UpdateForRemoveFromMempool(stage, updateDescendants);
     for (txiter it : stage) {
         removeUnchecked(it, reason);
     }
 }
 
 int CTxMemPool::Expire(int64_t time) {
     LOCK(cs);
     indexed_transaction_set::index<entry_time>::type::iterator it =
         mapTx.get<entry_time>().begin();
     setEntries toremove;
     while (it != mapTx.get<entry_time>().end() && it->GetTime() < time) {
         toremove.insert(mapTx.project<0>(it));
         it++;
     }
 
     setEntries stage;
     for (txiter removeit : toremove) {
         CalculateDescendants(removeit, stage);
     }
 
     RemoveStaged(stage, false, MemPoolRemovalReason::EXPIRY);
     return stage.size();
 }
 
 void CTxMemPool::LimitSize(size_t limit, unsigned long age) {
     int expired = Expire(GetTime() - age);
     if (expired != 0) {
         LogPrint(BCLog::MEMPOOL,
                  "Expired %i transactions from the memory pool\n", expired);
     }
 
     std::vector<COutPoint> vNoSpendsRemaining;
     TrimToSize(limit, &vNoSpendsRemaining);
     for (const COutPoint &removed : vNoSpendsRemaining) {
         pcoinsTip->Uncache(removed);
     }
 }
 
 void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry) {
     setEntries setAncestors;
     uint64_t nNoLimit = std::numeric_limits<uint64_t>::max();
     std::string dummy;
     CalculateMemPoolAncestors(entry, setAncestors, nNoLimit, nNoLimit, nNoLimit,
                               nNoLimit, dummy);
     return addUnchecked(entry, setAncestors);
 }
 
 void CTxMemPool::UpdateChild(txiter entry, txiter child, bool add) {
     setEntries s;
     if (add && mapLinks[entry].children.insert(child).second) {
         cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
     } else if (!add && mapLinks[entry].children.erase(child)) {
         cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
     }
 }
 
 void CTxMemPool::UpdateParent(txiter entry, txiter parent, bool add) {
     setEntries s;
     if (add && mapLinks[entry].parents.insert(parent).second) {
         cachedInnerUsage += memusage::IncrementalDynamicUsage(s);
     } else if (!add && mapLinks[entry].parents.erase(parent)) {
         cachedInnerUsage -= memusage::IncrementalDynamicUsage(s);
     }
 }
 
 const CTxMemPool::setEntries &
 CTxMemPool::GetMemPoolParents(txiter entry) const {
     assert(entry != mapTx.end());
     txlinksMap::const_iterator it = mapLinks.find(entry);
     assert(it != mapLinks.end());
     return it->second.parents;
 }
 
 const CTxMemPool::setEntries &
 CTxMemPool::GetMemPoolChildren(txiter entry) const {
     assert(entry != mapTx.end());
     txlinksMap::const_iterator it = mapLinks.find(entry);
     assert(it != mapLinks.end());
     return it->second.children;
 }
 
 CFeeRate CTxMemPool::GetMinFee(size_t sizelimit) const {
     LOCK(cs);
     if (!blockSinceLastRollingFeeBump || rollingMinimumFeeRate == 0) {
         return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI);
     }
 
     int64_t time = GetTime();
     if (time > lastRollingFeeUpdate + 10) {
         double halflife = ROLLING_FEE_HALFLIFE;
         if (DynamicMemoryUsage() < sizelimit / 4) {
             halflife /= 4;
         } else if (DynamicMemoryUsage() < sizelimit / 2) {
             halflife /= 2;
         }
 
         rollingMinimumFeeRate =
             rollingMinimumFeeRate /
             pow(2.0, (time - lastRollingFeeUpdate) / halflife);
         lastRollingFeeUpdate = time;
     }
     return CFeeRate(int64_t(ceill(rollingMinimumFeeRate)) * SATOSHI);
 }
 
 void CTxMemPool::trackPackageRemoved(const CFeeRate &rate) {
     AssertLockHeld(cs);
     if ((rate.GetFeePerK() / SATOSHI) > rollingMinimumFeeRate) {
         rollingMinimumFeeRate = rate.GetFeePerK() / SATOSHI;
         blockSinceLastRollingFeeBump = false;
     }
 }
 
 void CTxMemPool::TrimToSize(size_t sizelimit,
                             std::vector<COutPoint> *pvNoSpendsRemaining) {
     LOCK(cs);
 
     unsigned nTxnRemoved = 0;
     CFeeRate maxFeeRateRemoved(Amount::zero());
     while (!mapTx.empty() && DynamicMemoryUsage() > sizelimit) {
         indexed_transaction_set::index<descendant_score>::type::iterator it =
             mapTx.get<descendant_score>().begin();
 
         // We set the new mempool min fee to the feerate of the removed set,
         // plus the "minimum reasonable fee rate" (ie some value under which we
         // consider txn to have 0 fee). This way, we don't allow txn to enter
         // mempool with feerate equal to txn which were removed with no block in
         // between.
         CFeeRate removed(it->GetModFeesWithDescendants(),
                          it->GetVirtualSizeWithDescendants());
         removed += MEMPOOL_FULL_FEE_INCREMENT;
 
         trackPackageRemoved(removed);
         maxFeeRateRemoved = std::max(maxFeeRateRemoved, removed);
 
         setEntries stage;
         CalculateDescendants(mapTx.project<0>(it), stage);
         nTxnRemoved += stage.size();
 
         std::vector<CTransaction> txn;
         if (pvNoSpendsRemaining) {
             txn.reserve(stage.size());
             for (txiter iter : stage) {
                 txn.push_back(iter->GetTx());
             }
         }
         RemoveStaged(stage, false, MemPoolRemovalReason::SIZELIMIT);
         if (pvNoSpendsRemaining) {
             for (const CTransaction &tx : txn) {
                 for (const CTxIn &txin : tx.vin) {
                     if (exists(txin.prevout.GetTxId())) {
                         continue;
                     }
                     pvNoSpendsRemaining->push_back(txin.prevout);
                 }
             }
         }
     }
 
     if (maxFeeRateRemoved > CFeeRate(Amount::zero())) {
         LogPrint(BCLog::MEMPOOL,
                  "Removed %u txn, rolling minimum fee bumped to %s\n",
                  nTxnRemoved, maxFeeRateRemoved.ToString());
     }
 }
 
 uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const {
     // find parent with highest descendant count
     std::vector<txiter> candidates;
     setEntries counted;
     candidates.push_back(entry);
     uint64_t maximum = 0;
     while (candidates.size()) {
         txiter candidate = candidates.back();
         candidates.pop_back();
         if (!counted.insert(candidate).second) {
             continue;
         }
         const setEntries &parents = GetMemPoolParents(candidate);
         if (parents.size() == 0) {
             maximum = std::max(maximum, candidate->GetCountWithDescendants());
         } else {
             for (txiter i : parents) {
                 candidates.push_back(i);
             }
         }
     }
     return maximum;
 }
 
 void CTxMemPool::GetTransactionAncestry(const TxId &txid, size_t &ancestors,
                                         size_t &descendants) const {
     LOCK(cs);
     auto it = mapTx.find(txid);
     ancestors = descendants = 0;
     if (it != mapTx.end()) {
         ancestors = it->GetCountWithAncestors();
         descendants = CalculateDescendantMaximum(it);
     }
 }
 
 bool CTxMemPool::IsLoaded() const {
     LOCK(cs);
     return m_is_loaded;
 }
 
 void CTxMemPool::SetIsLoaded(bool loaded) {
     LOCK(cs);
     m_is_loaded = loaded;
 }
 
 SaltedTxidHasher::SaltedTxidHasher()
     : k0(GetRand(std::numeric_limits<uint64_t>::max())),
       k1(GetRand(std::numeric_limits<uint64_t>::max())) {}
 
 /** Maximum bytes for transactions to store for processing during reorg */
 static const size_t MAX_DISCONNECTED_TX_POOL_SIZE = 20 * DEFAULT_MAX_BLOCK_SIZE;
 
 void DisconnectedBlockTransactions::addForBlock(
     const std::vector<CTransactionRef> &vtx) {
     for (const auto &tx : reverse_iterate(vtx)) {
         // If we already added it, just skip.
         auto it = queuedTx.find(tx->GetId());
         if (it != queuedTx.end()) {
             continue;
         }
 
         // Insert the transaction into the pool.
         addTransaction(tx);
 
         // Fill in the set of parents.
         std::unordered_set<TxId, SaltedTxidHasher> parents;
         for (const CTxIn &in : tx->vin) {
             parents.insert(in.prevout.GetTxId());
         }
 
         // In order to make sure we keep things in topological order, we check
         // if we already know of the parent of the current transaction. If so,
         // we remove them from the set and then add them back.
         while (parents.size() > 0) {
             std::unordered_set<TxId, SaltedTxidHasher> worklist(
                 std::move(parents));
             parents.clear();
 
             for (const TxId &txid : worklist) {
                 // If we do not have that txid in the set, nothing needs to be
                 // done.
                 auto pit = queuedTx.find(txid);
                 if (pit == queuedTx.end()) {
                     continue;
                 }
 
                 // We have parent in our set, we reinsert them at the right
                 // position.
                 const CTransactionRef ptx = *pit;
                 queuedTx.erase(pit);
                 queuedTx.insert(ptx);
 
                 // And we make sure ancestors are covered.
                 for (const CTxIn &in : ptx->vin) {
                     parents.insert(in.prevout.GetTxId());
                 }
             }
         }
     }
 
     // Keep the size under control.
     while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) {
         // Drop the earliest entry, and remove its children from the
         // mempool.
         auto it = queuedTx.get<insertion_order>().begin();
         g_mempool.removeRecursive(**it, MemPoolRemovalReason::REORG);
         removeEntry(it);
     }
 }
 
 void DisconnectedBlockTransactions::importMempool(CTxMemPool &pool) {
     // addForBlock's algorithm sorts a vector of transactions back into
     // topological order. We use it in a separate object to create a valid
     // ordering of all mempool transactions, which we then splice in front of
     // the current queuedTx. This results in a valid sequence of transactions to
     // be reprocessed in updateMempoolForReorg.
 
     // We create vtx in order of the entry_time index to facilitate for
     // addForBlocks (which iterates in reverse order), as vtx probably end in
     // the correct ordering for queuedTx.
     std::vector<CTransactionRef> vtx;
     {
         LOCK(pool.cs);
         vtx.reserve(pool.mapTx.size());
         for (const CTxMemPoolEntry &e : pool.mapTx.get<entry_time>()) {
             vtx.push_back(e.GetSharedTx());
         }
         pool.clear();
     }
 
     // Use addForBlocks to sort the transactions and then splice them in front
     // of queuedTx
     DisconnectedBlockTransactions orderedTxnPool;
     orderedTxnPool.addForBlock(vtx);
     cachedInnerUsage += orderedTxnPool.cachedInnerUsage;
     queuedTx.get<insertion_order>().splice(
         queuedTx.get<insertion_order>().begin(),
         orderedTxnPool.queuedTx.get<insertion_order>());
 
     // We limit memory usage because we can't know if more blocks will be
     // disconnected
     while (DynamicMemoryUsage() > MAX_DISCONNECTED_TX_POOL_SIZE) {
         // Drop the earliest entry which, by definition, has no children
         removeEntry(queuedTx.get<insertion_order>().begin());
     }
 }
 
 void DisconnectedBlockTransactions::updateMempoolForReorg(const Config &config,
                                                           bool fAddToMempool) {
     AssertLockHeld(cs_main);
     std::vector<TxId> txidsUpdate;
 
     // disconnectpool's insertion_order index sorts the entries from oldest to
     // newest, but the oldest entry will be the last tx from the latest mined
     // block that was disconnected.
     // Iterate disconnectpool in reverse, so that we add transactions back to
     // the mempool starting with the earliest transaction that had been
     // previously seen in a block.
     for (const CTransactionRef &tx :
          reverse_iterate(queuedTx.get<insertion_order>())) {
         // ignore validation errors in resurrected transactions
         CValidationState stateDummy;
         if (!fAddToMempool || tx->IsCoinBase() ||
             !AcceptToMemoryPool(config, g_mempool, stateDummy, tx,
                                 nullptr /* pfMissingInputs */,
                                 true /* bypass_limits */,
                                 Amount::zero() /* nAbsurdFee */)) {
             // If the transaction doesn't make it in to the mempool, remove any
             // transactions that depend on it (which would now be orphans).
             g_mempool.removeRecursive(*tx, MemPoolRemovalReason::REORG);
         } else if (g_mempool.exists(tx->GetId())) {
             txidsUpdate.push_back(tx->GetId());
         }
     }
 
     queuedTx.clear();
 
     // AcceptToMemoryPool/addUnchecked all assume that new mempool entries have
     // no in-mempool children, which is generally not true when adding
     // previously-confirmed transactions back to the mempool.
     // UpdateTransactionsFromBlock finds descendants of any transactions in the
     // disconnectpool that were added back and cleans up the mempool state.
     g_mempool.UpdateTransactionsFromBlock(txidsUpdate);
 
     // We also need to remove any now-immature transactions
     g_mempool.removeForReorg(config, pcoinsTip.get(),
                              ::ChainActive().Tip()->nHeight + 1,
                              STANDARD_LOCKTIME_VERIFY_FLAGS);
 
     // Re-limit mempool size, in case we added any transactions
     g_mempool.LimitSize(
         gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
         gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60);
 }
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index 74ef1ac6a..ff1ce00a0 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -1,223 +1,201 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #include <validationinterface.h>
 
+#include <primitives/block.h>
 #include <scheduler.h>
-#include <txmempool.h>
 
 #include <atomic>
 #include <future>
 #include <list>
 #include <tuple>
+#include <unordered_map>
 #include <utility>
 
 #include <boost/signals2/signal.hpp>
 
 struct ValidationInterfaceConnections {
     boost::signals2::scoped_connection UpdatedBlockTip;
     boost::signals2::scoped_connection TransactionAddedToMempool;
     boost::signals2::scoped_connection BlockConnected;
     boost::signals2::scoped_connection BlockDisconnected;
     boost::signals2::scoped_connection TransactionRemovedFromMempool;
     boost::signals2::scoped_connection ChainStateFlushed;
     boost::signals2::scoped_connection BlockChecked;
     boost::signals2::scoped_connection NewPoWValidBlock;
 };
 
 struct MainSignalsInstance {
     boost::signals2::signal<void(const CBlockIndex *, const CBlockIndex *,
                                  bool fInitialDownload)>
         UpdatedBlockTip;
     boost::signals2::signal<void(const CTransactionRef &)>
         TransactionAddedToMempool;
     boost::signals2::signal<void(const std::shared_ptr<const CBlock> &,
                                  const CBlockIndex *pindex,
                                  const std::vector<CTransactionRef> &)>
         BlockConnected;
     boost::signals2::signal<void(const std::shared_ptr<const CBlock> &)>
         BlockDisconnected;
     boost::signals2::signal<void(const CTransactionRef &)>
         TransactionRemovedFromMempool;
     boost::signals2::signal<void(const CBlockLocator &)> ChainStateFlushed;
     boost::signals2::signal<void(const CBlock &, const CValidationState &)>
         BlockChecked;
     boost::signals2::signal<void(const CBlockIndex *,
                                  const std::shared_ptr<const CBlock> &)>
         NewPoWValidBlock;
 
     // We are not allowed to assume the scheduler only runs in one thread,
     // but must ensure all callbacks happen in-order, so we end up creating
     // our own queue here :(
     SingleThreadedSchedulerClient m_schedulerClient;
     std::unordered_map<CValidationInterface *, ValidationInterfaceConnections>
         m_connMainSignals;
 
     explicit MainSignalsInstance(CScheduler *pscheduler)
         : m_schedulerClient(pscheduler) {}
 };
 
 static CMainSignals g_signals;
 
-// This map has to be a separate global instead of a member of
-// MainSignalsInstance, because RegisterWithMempoolSignals is currently called
-// before RegisterBackgroundSignalScheduler, so MainSignalsInstance hasn't been
-// created yet.
-static std::unordered_map<CTxMemPool *, boost::signals2::scoped_connection>
-    g_connNotifyEntryRemoved;
-
 void CMainSignals::RegisterBackgroundSignalScheduler(CScheduler &scheduler) {
     assert(!m_internals);
     m_internals.reset(new MainSignalsInstance(&scheduler));
 }
 
 void CMainSignals::UnregisterBackgroundSignalScheduler() {
     m_internals.reset(nullptr);
 }
 
 void CMainSignals::FlushBackgroundCallbacks() {
     if (m_internals) {
         m_internals->m_schedulerClient.EmptyQueue();
     }
 }
 
 size_t CMainSignals::CallbacksPending() {
     if (!m_internals) {
         return 0;
     }
     return m_internals->m_schedulerClient.CallbacksPending();
 }
 
-void CMainSignals::RegisterWithMempoolSignals(CTxMemPool &pool) {
-    g_connNotifyEntryRemoved.emplace(
-        std::piecewise_construct, std::forward_as_tuple(&pool),
-        std::forward_as_tuple(pool.NotifyEntryRemoved.connect(
-            std::bind(&CMainSignals::MempoolEntryRemoved, this,
-                      std::placeholders::_1, std::placeholders::_2))));
-}
-
-void CMainSignals::UnregisterWithMempoolSignals(CTxMemPool &pool) {
-    g_connNotifyEntryRemoved.erase(&pool);
-}
-
 CMainSignals &GetMainSignals() {
     return g_signals;
 }
 
 void RegisterValidationInterface(CValidationInterface *pwalletIn) {
     ValidationInterfaceConnections &conns =
         g_signals.m_internals->m_connMainSignals[pwalletIn];
     conns.UpdatedBlockTip = g_signals.m_internals->UpdatedBlockTip.connect(
         std::bind(&CValidationInterface::UpdatedBlockTip, pwalletIn,
                   std::placeholders::_1, std::placeholders::_2,
                   std::placeholders::_3));
     conns.TransactionAddedToMempool =
         g_signals.m_internals->TransactionAddedToMempool.connect(
             std::bind(&CValidationInterface::TransactionAddedToMempool,
                       pwalletIn, std::placeholders::_1));
     conns.BlockConnected = g_signals.m_internals->BlockConnected.connect(
         std::bind(&CValidationInterface::BlockConnected, pwalletIn,
                   std::placeholders::_1, std::placeholders::_2,
                   std::placeholders::_3));
     conns.BlockDisconnected = g_signals.m_internals->BlockDisconnected.connect(
         std::bind(&CValidationInterface::BlockDisconnected, pwalletIn,
                   std::placeholders::_1));
     conns.TransactionRemovedFromMempool =
         g_signals.m_internals->TransactionRemovedFromMempool.connect(
             std::bind(&CValidationInterface::TransactionRemovedFromMempool,
                       pwalletIn, std::placeholders::_1));
     conns.ChainStateFlushed = g_signals.m_internals->ChainStateFlushed.connect(
         std::bind(&CValidationInterface::ChainStateFlushed, pwalletIn,
                   std::placeholders::_1));
     conns.BlockChecked = g_signals.m_internals->BlockChecked.connect(
         std::bind(&CValidationInterface::BlockChecked, pwalletIn,
                   std::placeholders::_1, std::placeholders::_2));
     conns.NewPoWValidBlock = g_signals.m_internals->NewPoWValidBlock.connect(
         std::bind(&CValidationInterface::NewPoWValidBlock, pwalletIn,
                   std::placeholders::_1, std::placeholders::_2));
 }
 
 void UnregisterValidationInterface(CValidationInterface *pwalletIn) {
     if (g_signals.m_internals) {
         g_signals.m_internals->m_connMainSignals.erase(pwalletIn);
     }
 }
 
 void UnregisterAllValidationInterfaces() {
     if (!g_signals.m_internals) {
         return;
     }
     g_signals.m_internals->m_connMainSignals.clear();
 }
 
 void CallFunctionInValidationInterfaceQueue(std::function<void()> func) {
     g_signals.m_internals->m_schedulerClient.AddToProcessQueue(std::move(func));
 }
 
 void SyncWithValidationInterfaceQueue() {
     AssertLockNotHeld(cs_main);
     // Block until the validation queue drains
     std::promise<void> promise;
     CallFunctionInValidationInterfaceQueue([&promise] { promise.set_value(); });
     promise.get_future().wait();
 }
 
-void CMainSignals::MempoolEntryRemoved(CTransactionRef ptx,
-                                       MemPoolRemovalReason reason) {
-    if (reason != MemPoolRemovalReason::BLOCK &&
-        reason != MemPoolRemovalReason::CONFLICT) {
-        m_internals->m_schedulerClient.AddToProcessQueue(
-            [ptx, this] { m_internals->TransactionRemovedFromMempool(ptx); });
-    }
-}
-
 void CMainSignals::UpdatedBlockTip(const CBlockIndex *pindexNew,
                                    const CBlockIndex *pindexFork,
                                    bool fInitialDownload) {
     // Dependencies exist that require UpdatedBlockTip events to be delivered in
     // the order in which the chain actually updates. One way to ensure this is
     // for the caller to invoke this signal in the same critical section where
     // the chain is updated
 
     m_internals->m_schedulerClient.AddToProcessQueue([pindexNew, pindexFork,
                                                       fInitialDownload, this] {
         m_internals->UpdatedBlockTip(pindexNew, pindexFork, fInitialDownload);
     });
 }
 
 void CMainSignals::TransactionAddedToMempool(const CTransactionRef &ptx) {
     m_internals->m_schedulerClient.AddToProcessQueue(
         [ptx, this] { m_internals->TransactionAddedToMempool(ptx); });
 }
 
+void CMainSignals::TransactionRemovedFromMempool(const CTransactionRef &ptx) {
+    m_internals->m_schedulerClient.AddToProcessQueue(
+        [ptx, this] { m_internals->TransactionRemovedFromMempool(ptx); });
+}
+
 void CMainSignals::BlockConnected(
     const std::shared_ptr<const CBlock> &pblock, const CBlockIndex *pindex,
     const std::shared_ptr<const std::vector<CTransactionRef>> &pvtxConflicted) {
     m_internals->m_schedulerClient.AddToProcessQueue(
         [pblock, pindex, pvtxConflicted, this] {
             m_internals->BlockConnected(pblock, pindex, *pvtxConflicted);
         });
 }
 
 void CMainSignals::BlockDisconnected(
     const std::shared_ptr<const CBlock> &pblock) {
     m_internals->m_schedulerClient.AddToProcessQueue(
         [pblock, this] { m_internals->BlockDisconnected(pblock); });
 }
 
 void CMainSignals::ChainStateFlushed(const CBlockLocator &locator) {
     m_internals->m_schedulerClient.AddToProcessQueue(
         [locator, this] { m_internals->ChainStateFlushed(locator); });
 }
 
 void CMainSignals::BlockChecked(const CBlock &block,
                                 const CValidationState &state) {
     m_internals->BlockChecked(block, state);
 }
 
 void CMainSignals::NewPoWValidBlock(
     const CBlockIndex *pindex, const std::shared_ptr<const CBlock> &block) {
     m_internals->NewPoWValidBlock(pindex, block);
 }
diff --git a/src/validationinterface.h b/src/validationinterface.h
index 0c9fed6fb..73b58eaa4 100644
--- a/src/validationinterface.h
+++ b/src/validationinterface.h
@@ -1,213 +1,205 @@
 // Copyright (c) 2009-2010 Satoshi Nakamoto
 // Copyright (c) 2009-2016 The Bitcoin Core developers
 // Distributed under the MIT software license, see the accompanying
 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
 
 #ifndef BITCOIN_VALIDATIONINTERFACE_H
 #define BITCOIN_VALIDATIONINTERFACE_H
 
 #include <primitives/transaction.h> // CTransaction(Ref)
 #include <sync.h>
 
 #include <functional>
 #include <memory>
 
 extern RecursiveMutex cs_main;
 class CBlock;
 class CBlockIndex;
 struct CBlockLocator;
 class CBlockIndex;
 class CConnman;
 class CReserveScript;
 class CValidationInterface;
 class CValidationState;
 class uint256;
 class CScheduler;
-class CTxMemPool;
-enum class MemPoolRemovalReason;
 
 // These functions dispatch to one or all registered wallets
 
 /** Register a wallet to receive updates from core */
 void RegisterValidationInterface(CValidationInterface *pwalletIn);
 /** Unregister a wallet from core */
 void UnregisterValidationInterface(CValidationInterface *pwalletIn);
 /** Unregister all wallets from core */
 void UnregisterAllValidationInterfaces();
 /**
  * Pushes a function to callback onto the notification queue, guaranteeing any
  * callbacks generated prior to now are finished when the function is called.
  *
  * Be very careful blocking on func to be called if any locks are held -
  * validation interface clients may not be able to make progress as they often
  * wait for things like cs_main, so blocking until func is called with cs_main
  * will result in a deadlock (that DEBUG_LOCKORDER will miss).
  */
 void CallFunctionInValidationInterfaceQueue(std::function<void()> func);
 /**
  * This is a synonym for the following, which asserts certain locks are not
  * held:
  *     std::promise<void> promise;
  *     CallFunctionInValidationInterfaceQueue([&promise] {
  *         promise.set_value();
  *     });
  *     promise.get_future().wait();
  */
 void SyncWithValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main);
 
 /**
  * Implement this to subscribe to events generated in validation
  *
  * Each CValidationInterface() subscriber will receive event callbacks
  * in the order in which the events were generated by validation.
  * Furthermore, each ValidationInterface() subscriber may assume that
  * callbacks effectively run in a single thread with single-threaded
  * memory consistency. That is, for a given ValidationInterface()
  * instantiation, each callback will complete before the next one is
  * invoked. This means, for example when a block is connected that the
  * UpdatedBlockTip() callback may depend on an operation performed in
  * the BlockConnected() callback without worrying about explicit
  * synchronization. No ordering should be assumed across
  * ValidationInterface() subscribers.
  */
 class CValidationInterface {
 protected:
     /**
      * Protected destructor so that instances can only be deleted by derived
      * classes. If that restriction is no longer desired, this should be made
      * public and virtual.
      */
     ~CValidationInterface() = default;
     /**
      * Notifies listeners when the block chain tip advances.
      *
      * When multiple blocks are connected at once, UpdatedBlockTip will be
      * called on the final tip but may not be called on every intermediate tip.
      * If the latter behavior is desired, subscribe to BlockConnected() instead.
      *
      * Called on a background thread.
      */
     virtual void UpdatedBlockTip(const CBlockIndex *pindexNew,
                                  const CBlockIndex *pindexFork,
                                  bool fInitialDownload) {}
     /**
      * Notifies listeners of a transaction having been added to mempool.
      *
      * Called on a background thread.
      */
     virtual void TransactionAddedToMempool(const CTransactionRef &ptxn) {}
     /**
      * Notifies listeners of a transaction leaving mempool.
      *
      * This only fires for transactions which leave mempool because of expiry,
      * size limiting, reorg (changes in lock times/coinbase maturity), or
      * replacement. This does not include any transactions which are included
      * in BlockConnectedDisconnected either in block->vtx or in txnConflicted.
      *
      * Called on a background thread.
      */
     virtual void TransactionRemovedFromMempool(const CTransactionRef &ptx) {}
     /**
      * Notifies listeners of a block being connected.
      * Provides a vector of transactions evicted from the mempool as a result.
      *
      * Called on a background thread.
      */
     virtual void
     BlockConnected(const std::shared_ptr<const CBlock> &block,
                    const CBlockIndex *pindex,
                    const std::vector<CTransactionRef> &txnConflicted) {}
     /**
      * Notifies listeners of a block being disconnected
      *
      * Called on a background thread.
      */
     virtual void BlockDisconnected(const std::shared_ptr<const CBlock> &block) {
     }
     /**
      * Notifies listeners of the new active block chain on-disk.
      *
      * Prior to this callback, any updates are not guaranteed to persist on disk
      * (ie clients need to handle shutdown/restart safety by being able to
      * understand when some updates were lost due to unclean shutdown).
      *
      * When this callback is invoked, the validation changes done by any prior
      * callback are guaranteed to exist on disk and survive a restart, including
      * an unclean shutdown.
      *
      * Provides a locator describing the best chain, which is likely useful for
      * storing current state on disk in client DBs.
      *
      * Called on a background thread.
      */
     virtual void ChainStateFlushed(const CBlockLocator &locator) {}
     /**
      * Notifies listeners of a block validation result.
      * If the provided CValidationState IsValid, the provided block
      * is guaranteed to be the current best block at the time the
      * callback was generated (not necessarily now)
      */
     virtual void BlockChecked(const CBlock &, const CValidationState &) {}
     /**
      * Notifies listeners that a block which builds directly on our current tip
      * has been received and connected to the headers tree, though not validated
      * yet.
      */
     virtual void NewPoWValidBlock(const CBlockIndex *pindex,
                                   const std::shared_ptr<const CBlock> &block){};
     friend void ::RegisterValidationInterface(CValidationInterface *);
     friend void ::UnregisterValidationInterface(CValidationInterface *);
     friend void ::UnregisterAllValidationInterfaces();
 };
 
 struct MainSignalsInstance;
 class CMainSignals {
 private:
     std::unique_ptr<MainSignalsInstance> m_internals;
 
     friend void ::RegisterValidationInterface(CValidationInterface *);
     friend void ::UnregisterValidationInterface(CValidationInterface *);
     friend void ::UnregisterAllValidationInterfaces();
     friend void ::CallFunctionInValidationInterfaceQueue(
         std::function<void()> func);
 
-    void MempoolEntryRemoved(CTransactionRef tx, MemPoolRemovalReason reason);
-
 public:
     /**
      * Register a CScheduler to give callbacks which should run in the
      * background (may only be called once)
      */
     void RegisterBackgroundSignalScheduler(CScheduler &scheduler);
     /**
      * Unregister a CScheduler to give callbacks which should run in the
      * background - these callbacks will now be dropped!
      */
     void UnregisterBackgroundSignalScheduler();
     /** Call any remaining callbacks on the calling thread */
     void FlushBackgroundCallbacks();
 
     size_t CallbacksPending();
 
-    /** Register with mempool to call TransactionRemovedFromMempool callbacks */
-    void RegisterWithMempoolSignals(CTxMemPool &pool);
-    /** Unregister with mempool */
-    void UnregisterWithMempoolSignals(CTxMemPool &pool);
-
     void UpdatedBlockTip(const CBlockIndex *, const CBlockIndex *,
                          bool fInitialDownload);
     void TransactionAddedToMempool(const CTransactionRef &);
+    void TransactionRemovedFromMempool(const CTransactionRef &);
     void
     BlockConnected(const std::shared_ptr<const CBlock> &,
                    const CBlockIndex *pindex,
                    const std::shared_ptr<const std::vector<CTransactionRef>> &);
     void BlockDisconnected(const std::shared_ptr<const CBlock> &);
     void ChainStateFlushed(const CBlockLocator &);
     void BlockChecked(const CBlock &, const CValidationState &);
     void NewPoWValidBlock(const CBlockIndex *,
                           const std::shared_ptr<const CBlock> &);
 };
 
 CMainSignals &GetMainSignals();
 
 #endif // BITCOIN_VALIDATIONINTERFACE_H
diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh
index 38e846249..f498ff216 100755
--- a/test/lint/lint-circular-dependencies.sh
+++ b/test/lint/lint-circular-dependencies.sh
@@ -1,77 +1,76 @@
 #!/usr/bin/env bash
 #
 # Copyright (c) 2018 The Bitcoin Core developers
 # Distributed under the MIT software license, see the accompanying
 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
 #
 # Check for circular dependencies
 
 export LC_ALL=C
 
 EXPECTED_CIRCULAR_DEPENDENCIES=(
     "index/txindex -> validation -> index/txindex"
     "qt/addresstablemodel -> qt/walletmodel -> qt/addresstablemodel"
     "qt/bantablemodel -> qt/clientmodel -> qt/bantablemodel"
     "qt/bitcoingui -> qt/utilitydialog -> qt/bitcoingui"
     "qt/bitcoingui -> qt/walletframe -> qt/bitcoingui"
     "qt/bitcoingui -> qt/walletview -> qt/bitcoingui"
     "qt/clientmodel -> qt/peertablemodel -> qt/clientmodel"
     "qt/paymentserver -> qt/walletmodel -> qt/paymentserver"
     "qt/recentrequeststablemodel -> qt/walletmodel -> qt/recentrequeststablemodel"
     "qt/transactiontablemodel -> qt/walletmodel -> qt/transactiontablemodel"
     "qt/walletmodel -> qt/walletmodeltransaction -> qt/walletmodel"
     "txmempool -> validation -> txmempool"
     "wallet/coincontrol -> wallet/wallet -> wallet/coincontrol"
     "wallet/fees -> wallet/wallet -> wallet/fees"
     "wallet/rpcwallet -> wallet/wallet -> wallet/rpcwallet"
     "wallet/wallet -> wallet/walletdb -> wallet/wallet"
     "wallet/ismine -> wallet/wallet -> wallet/ismine"
-    "txmempool -> validation -> validationinterface -> txmempool"
     "avalanche -> validation -> avalanche"
     "chainparams -> protocol -> chainparams"
     "chainparamsbase -> util/system -> chainparamsbase"
     "minerfund -> validation -> minerfund"
     "script/scriptcache -> validation -> script/scriptcache"
     "seeder/bitcoin -> seeder/db -> seeder/bitcoin"
     "chainparams -> protocol -> config -> chainparams"
 )
 
 EXIT_CODE=0
 
 CIRCULAR_DEPENDENCIES=()
 
 IFS=$'\n'
 for CIRC in $(cd src && ../contrib/devtools/circular-dependencies.py {*,*/*,*/*/*}.{h,cpp} | sed -e 's/^Circular dependency: //'); do
     CIRCULAR_DEPENDENCIES+=("$CIRC")
     IS_EXPECTED_CIRC=0
     for EXPECTED_CIRC in "${EXPECTED_CIRCULAR_DEPENDENCIES[@]}"; do
         if [[ "${CIRC}" == "${EXPECTED_CIRC}" ]]; then
             IS_EXPECTED_CIRC=1
             break
         fi
     done
     if [[ ${IS_EXPECTED_CIRC} == 0 ]]; then
         echo "A new circular dependency in the form of \"${CIRC}\" appears to have been introduced."
         echo
         EXIT_CODE=1
     fi
 done
 
 for EXPECTED_CIRC in "${EXPECTED_CIRCULAR_DEPENDENCIES[@]}"; do
     IS_PRESENT_EXPECTED_CIRC=0
     for CIRC in "${CIRCULAR_DEPENDENCIES[@]}"; do
         if [[ "${CIRC}" == "${EXPECTED_CIRC}" ]]; then
             IS_PRESENT_EXPECTED_CIRC=1
             break
         fi
     done
     if [[ ${IS_PRESENT_EXPECTED_CIRC} == 0 ]]; then
         echo "Good job! The circular dependency \"${EXPECTED_CIRC}\" is no longer present."
         echo "Please remove it from EXPECTED_CIRCULAR_DEPENDENCIES in $0"
         echo "to make sure this circular dependency is not accidentally reintroduced."
         echo
         EXIT_CODE=1
     fi
 done
 
 exit ${EXIT_CODE}