diff --git a/src/init.cpp b/src/init.cpp index 511753950..e0a872f1f 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1,2265 +1,2265 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "init.h" #include "addrman.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "checkpoints.h" #include "compat/sanity.h" #include "config.h" #include "consensus/validation.h" #include "fs.h" #include "httprpc.h" #include "httpserver.h" #include "key.h" #include "miner.h" #include "net.h" #include "net_processing.h" #include "netbase.h" #include "policy/policy.h" #include "rpc/register.h" #include "rpc/server.h" #include "scheduler.h" #include "script/scriptcache.h" #include "script/sigcache.h" #include "script/standard.h" #include "timedata.h" #include "torcontrol.h" #include "txdb.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "validation.h" #include "validationinterface.h" #ifdef ENABLE_WALLET #include "wallet/rpcdump.h" #include "wallet/wallet.h" #endif #include "warnings.h" #include #include #include #ifndef WIN32 #include #endif #include #include #include #include #include #include #include #if ENABLE_ZMQ #include "zmq/zmqnotificationinterface.h" #endif bool fFeeEstimatesInitialized = false; static const bool DEFAULT_PROXYRANDOMIZE = true; static const bool DEFAULT_REST_ENABLE = false; static const bool DEFAULT_DISABLE_SAFEMODE = false; static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false; std::unique_ptr g_connman; std::unique_ptr peerLogic; #if ENABLE_ZMQ static CZMQNotificationInterface *pzmqNotificationInterface = nullptr; #endif #ifdef WIN32 // Win32 LevelDB doesn't use filedescriptors, and the ones used for accessing // block files don't count towards the fd_set size limit anyway. #define MIN_CORE_FILEDESCRIPTORS 0 #else #define MIN_CORE_FILEDESCRIPTORS 150 #endif /** Used to pass flags to the Bind() function */ enum BindFlags { BF_NONE = 0, BF_EXPLICIT = (1U << 0), BF_REPORT_ERROR = (1U << 1), BF_WHITELIST = (1U << 2), }; static const char *FEE_ESTIMATES_FILENAME = "fee_estimates.dat"; ////////////////////////////////////////////////////////////////////////////// // // Shutdown // // // Thread management and startup/shutdown: // // The network-processing threads are all part of a thread group created by // AppInit() or the Qt main() function. // // A clean exit happens when StartShutdown() or the SIGTERM signal handler sets // fRequestShutdown, which triggers the DetectShutdownThread(), which interrupts // the main thread group. DetectShutdownThread() then exits, which causes // AppInit() to continue (it .joins the shutdown thread). Shutdown() is then // called to clean up database connections, and stop other threads that should // only be stopped after the main network-processing threads have exited. // // Note that if running -daemon the parent process returns from AppInit2 before // adding any threads to the threadGroup, so .join_all() returns immediately and // the parent exits from main(). // // Shutdown for Qt is very similar, only it uses a QTimer to detect // fRequestShutdown getting set, and then does the normal Qt shutdown thing. // std::atomic fRequestShutdown(false); std::atomic fDumpMempoolLater(false); void StartShutdown() { fRequestShutdown = true; } bool ShutdownRequested() { return fRequestShutdown; } /** * This is a minimally invasive approach to shutdown on LevelDB read errors from * the chainstate, while keeping user interface out of the common library, which * is shared between bitcoind, and bitcoin-qt and non-server tools. */ class CCoinsViewErrorCatcher final : public CCoinsViewBacked { public: CCoinsViewErrorCatcher(CCoinsView *view) : CCoinsViewBacked(view) {} bool GetCoin(const COutPoint &outpoint, Coin &coin) const override { try { return CCoinsViewBacked::GetCoin(outpoint, coin); } catch (const std::runtime_error &e) { uiInterface.ThreadSafeMessageBox( _("Error reading from database, shutting down."), "", CClientUIInterface::MSG_ERROR); LogPrintf("Error reading from database: %s\n", e.what()); // Starting the shutdown sequence and returning false to the caller // would be interpreted as 'entry not found' (as opposed to unable // to read data), and could lead to invalid interpretation. Just // exit immediately, as we can't continue anyway, and all writes // should be atomic. abort(); } } // Writes do not need similar protection, as failure to write is handled by // the caller. }; static CCoinsViewDB *pcoinsdbview = nullptr; static CCoinsViewErrorCatcher *pcoinscatcher = nullptr; static std::unique_ptr globalVerifyHandle; void Interrupt(boost::thread_group &threadGroup) { InterruptHTTPServer(); InterruptHTTPRPC(); InterruptRPC(); InterruptREST(); InterruptTorControl(); if (g_connman) g_connman->Interrupt(); threadGroup.interrupt_all(); } void Shutdown() { LogPrintf("%s: In progress...\n", __func__); static CCriticalSection cs_Shutdown; TRY_LOCK(cs_Shutdown, lockShutdown); if (!lockShutdown) return; /// Note: Shutdown() must be able to handle cases in which AppInit2() failed /// part of the way, for example if the data directory was found to be /// locked. Be sure that anything that writes files or flushes caches only /// does this if the respective module was initialized. RenameThread("bitcoin-shutoff"); mempool.AddTransactionsUpdated(1); StopHTTPRPC(); StopREST(); StopRPC(); StopHTTPServer(); #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { pwallet->Flush(false); } #endif MapPort(false); UnregisterValidationInterface(peerLogic.get()); peerLogic.reset(); g_connman.reset(); StopTorControl(); UnregisterNodeSignals(GetNodeSignals()); if (fDumpMempoolLater && gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { DumpMempool(); } if (fFeeEstimatesInitialized) { fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION); if (!est_fileout.IsNull()) mempool.WriteFeeEstimates(est_fileout); else LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string()); fFeeEstimatesInitialized = false; } { LOCK(cs_main); if (pcoinsTip != nullptr) { FlushStateToDisk(); } delete pcoinsTip; pcoinsTip = nullptr; delete pcoinscatcher; pcoinscatcher = nullptr; delete pcoinsdbview; pcoinsdbview = nullptr; delete pblocktree; pblocktree = nullptr; } #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { pwallet->Flush(true); } #endif #if ENABLE_ZMQ if (pzmqNotificationInterface) { UnregisterValidationInterface(pzmqNotificationInterface); delete pzmqNotificationInterface; pzmqNotificationInterface = nullptr; } #endif #ifndef WIN32 try { fs::remove(GetPidFile()); } catch (const fs::filesystem_error &e) { LogPrintf("%s: Unable to remove pidfile: %s\n", __func__, e.what()); } #endif UnregisterAllValidationInterfaces(); #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { delete pwallet; } vpwallets.clear(); #endif globalVerifyHandle.reset(); ECC_Stop(); LogPrintf("%s: done\n", __func__); } /** * Signal handlers are very limited in what they are allowed to do, so: */ void HandleSIGTERM(int) { fRequestShutdown = true; } void HandleSIGHUP(int) { fReopenDebugLog = true; } static bool Bind(CConnman &connman, const CService &addr, unsigned int flags) { if (!(flags & BF_EXPLICIT) && IsLimited(addr)) return false; std::string strError; if (!connman.BindListenPort(addr, strError, (flags & BF_WHITELIST) != 0)) { if (flags & BF_REPORT_ERROR) return InitError(strError); return false; } return true; } void OnRPCStarted() { uiInterface.NotifyBlockTip.connect(&RPCNotifyBlockChange); } void OnRPCStopped() { uiInterface.NotifyBlockTip.disconnect(&RPCNotifyBlockChange); RPCNotifyBlockChange(false, nullptr); cvBlockChange.notify_all(); LogPrint(BCLog::RPC, "RPC stopped.\n"); } void OnRPCPreCommand(const CRPCCommand &cmd) { // Observe safe mode. std::string strWarning = GetWarnings("rpc"); if (strWarning != "" && !gArgs.GetBoolArg("-disablesafemode", DEFAULT_DISABLE_SAFEMODE) && !cmd.okSafeMode) throw JSONRPCError(RPC_FORBIDDEN_BY_SAFE_MODE, std::string("Safe mode: ") + strWarning); } std::string HelpMessage(HelpMessageMode mode) { const bool showDebug = gArgs.GetBoolArg("-help-debug", false); // When adding new options to the categories, please keep and ensure // alphabetical ordering. Do not translate _(...) -help-debug options, Many // technical terms, and only a very small audience, so is unnecessary stress // to translators. std::string strUsage = HelpMessageGroup(_("Options:")); strUsage += HelpMessageOpt("-?", _("Print this help message and exit")); strUsage += HelpMessageOpt("-version", _("Print version and exit")); strUsage += HelpMessageOpt( "-alertnotify=", _("Execute command when a relevant alert is received or we see a " "really long fork (%s in cmd is replaced by message)")); strUsage += HelpMessageOpt("-blocknotify=", _("Execute command when the best block changes " "(%s in cmd is replaced by block hash)")); if (showDebug) strUsage += HelpMessageOpt( "-blocksonly", strprintf( _("Whether to operate in a blocks only mode (default: %d)"), DEFAULT_BLOCKSONLY)); strUsage += HelpMessageOpt( "-assumevalid=", strprintf(_("If this block is in the chain assume that it and its " "ancestors are valid and potentially skip their script " "verification (0 to verify all, default: %s, testnet: %s)"), Params(CBaseChainParams::MAIN) .GetConsensus() .defaultAssumeValid.GetHex(), Params(CBaseChainParams::TESTNET) .GetConsensus() .defaultAssumeValid.GetHex())); strUsage += HelpMessageOpt( "-conf=", strprintf(_("Specify configuration file (default: %s)"), BITCOIN_CONF_FILENAME)); if (mode == HMM_BITCOIND) { #if HAVE_DECL_DAEMON strUsage += HelpMessageOpt( "-daemon", _("Run in the background as a daemon and accept commands")); #endif } strUsage += HelpMessageOpt("-datadir=", _("Specify data directory")); if (showDebug) { strUsage += HelpMessageOpt( "-dbbatchsize", strprintf( "Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize)); } strUsage += HelpMessageOpt( "-dbcache=", strprintf( _("Set database cache size in megabytes (%d to %d, default: %d)"), nMinDbCache, nMaxDbCache, nDefaultDbCache)); if (showDebug) strUsage += HelpMessageOpt( "-feefilter", strprintf("Tell other nodes to filter invs to us by " "our mempool min fee (default: %d)", DEFAULT_FEEFILTER)); strUsage += HelpMessageOpt( "-loadblock=", _("Imports blocks from external blk000??.dat file on startup")); strUsage += HelpMessageOpt( "-maxorphantx=", strprintf(_("Keep at most unconnectable " "transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS)); strUsage += HelpMessageOpt("-maxmempool=", strprintf(_("Keep the transaction memory pool " "below megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE)); strUsage += HelpMessageOpt("-mempoolexpiry=", strprintf(_("Do not keep transactions in the mempool " "longer than hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY)); strUsage += HelpMessageOpt("-persistmempool", strprintf(_("Whether to save the mempool on shutdown " "and load on restart (default: %u)"), DEFAULT_PERSIST_MEMPOOL)); strUsage += HelpMessageOpt( "-blockreconstructionextratxn=", strprintf(_("Extra transactions to keep in memory for compact block " "reconstructions (default: %u)"), DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN)); strUsage += HelpMessageOpt( "-par=", strprintf(_("Set the number of script verification threads (%u to %d, " "0 = auto, <0 = leave that many cores free, default: %d)"), -GetNumCores(), MAX_SCRIPTCHECK_THREADS, DEFAULT_SCRIPTCHECK_THREADS)); #ifndef WIN32 strUsage += HelpMessageOpt( "-pid=", strprintf(_("Specify pid file (default: %s)"), BITCOIN_PID_FILENAME)); #endif strUsage += HelpMessageOpt( "-prune=", strprintf( _("Reduce storage requirements by enabling pruning (deleting) of " "old blocks. This allows the pruneblockchain RPC to be called to " "delete specific blocks, and enables automatic pruning of old " "blocks if a target size in MiB is provided. This mode is " "incompatible with -txindex and -rescan. " "Warning: Reverting this setting requires re-downloading the " "entire blockchain. " "(default: 0 = disable pruning blocks, 1 = allow manual pruning " "via RPC, >%u = automatically prune block files to stay under " "the specified target size in MiB)"), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); strUsage += HelpMessageOpt( "-reindex-chainstate", _("Rebuild chain state from the currently indexed blocks")); strUsage += HelpMessageOpt("-reindex", _("Rebuild chain state and block index from " "the blk*.dat files on disk")); #ifndef WIN32 strUsage += HelpMessageOpt( "-sysperms", _("Create new files with system default permissions, instead of umask " "077 (only effective with disabled wallet functionality)")); #endif strUsage += HelpMessageOpt( "-txindex", strprintf(_("Maintain a full transaction index, used by " "the getrawtransaction rpc call (default: %d)"), DEFAULT_TXINDEX)); strUsage += HelpMessageOpt( "-usecashaddr", _("Use Cash Address for destination encoding instead " "of base58 (activate by default on Jan, 14)")); strUsage += HelpMessageGroup(_("Connection options:")); strUsage += HelpMessageOpt( "-addnode=", _("Add a node to connect to and attempt to keep the connection open")); strUsage += HelpMessageOpt( "-banscore=", strprintf( _("Threshold for disconnecting misbehaving peers (default: %u)"), DEFAULT_BANSCORE_THRESHOLD)); strUsage += HelpMessageOpt( "-bantime=", strprintf(_("Number of seconds to keep misbehaving " "peers from reconnecting (default: %u)"), DEFAULT_MISBEHAVING_BANTIME)); strUsage += HelpMessageOpt("-bind=", _("Bind to given address and always listen on " "it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt("-connect=", _("Connect only to the specified node(s); -noconnect or " "-connect=0 alone to disable automatic connections")); strUsage += HelpMessageOpt("-discover", _("Discover own IP addresses (default: 1 when " "listening and no -externalip or -proxy)")); strUsage += HelpMessageOpt( "-dns", _("Allow DNS lookups for -addnode, -seednode and -connect") + " " + strprintf(_("(default: %d)"), DEFAULT_NAME_LOOKUP)); strUsage += HelpMessageOpt( "-dnsseed", _("Query for peer addresses via DNS lookup, if low on " "addresses (default: 1 unless -connect/-noconnect)")); strUsage += HelpMessageOpt("-externalip=", _("Specify your own public address")); strUsage += HelpMessageOpt( "-forcednsseed", strprintf( _("Always query for peer addresses via DNS lookup (default: %d)"), DEFAULT_FORCEDNSSEED)); strUsage += HelpMessageOpt("-listen", _("Accept connections from outside (default: " "1 if no -proxy or -connect/-noconnect)")); strUsage += HelpMessageOpt( "-listenonion", strprintf(_("Automatically create Tor hidden service (default: %d)"), DEFAULT_LISTEN_ONION)); strUsage += HelpMessageOpt( "-maxconnections=", strprintf(_("Maintain at most connections to peers (default: %u)"), DEFAULT_MAX_PEER_CONNECTIONS)); strUsage += HelpMessageOpt("-maxreceivebuffer=", strprintf(_("Maximum per-connection receive buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXRECEIVEBUFFER)); strUsage += HelpMessageOpt( "-maxsendbuffer=", strprintf(_("Maximum per-connection send buffer, " "*1000 bytes (default: %u)"), DEFAULT_MAXSENDBUFFER)); strUsage += HelpMessageOpt( "-maxtimeadjustment", strprintf(_("Maximum allowed median peer time offset adjustment. Local " "perspective of time may be influenced by peers forward or " "backward by this amount. (default: %u seconds)"), DEFAULT_MAX_TIME_ADJUSTMENT)); strUsage += HelpMessageOpt("-onion=", strprintf(_("Use separate SOCKS5 proxy to reach peers " "via Tor hidden services (default: %s)"), "-proxy")); strUsage += HelpMessageOpt( "-onlynet=", _("Only connect to nodes in network (ipv4, ipv6 or onion)")); strUsage += HelpMessageOpt("-permitbaremultisig", strprintf(_("Relay non-P2SH multisig (default: %d)"), DEFAULT_PERMIT_BAREMULTISIG)); strUsage += HelpMessageOpt( "-peerbloomfilters", strprintf(_("Support filtering of blocks and transaction with bloom " "filters (default: %d)"), DEFAULT_PEERBLOOMFILTERS)); strUsage += HelpMessageOpt( "-port=", strprintf( _("Listen for connections on (default: %u or testnet: %u)"), Params(CBaseChainParams::MAIN).GetDefaultPort(), Params(CBaseChainParams::TESTNET).GetDefaultPort())); strUsage += HelpMessageOpt("-proxy=", _("Connect through SOCKS5 proxy")); strUsage += HelpMessageOpt( "-proxyrandomize", strprintf(_("Randomize credentials for every proxy connection. This " "enables Tor stream isolation (default: %d)"), DEFAULT_PROXYRANDOMIZE)); strUsage += HelpMessageOpt( "-seednode=", _("Connect to a node to retrieve peer addresses, and disconnect")); strUsage += HelpMessageOpt( "-timeout=", strprintf(_("Specify connection timeout in " "milliseconds (minimum: 1, default: %d)"), DEFAULT_CONNECT_TIMEOUT)); strUsage += HelpMessageOpt("-torcontrol=:", strprintf(_("Tor control port to use if onion " "listening enabled (default: %s)"), DEFAULT_TOR_CONTROL)); strUsage += HelpMessageOpt("-torpassword=", _("Tor control port password (default: empty)")); #ifdef USE_UPNP #if USE_UPNP strUsage += HelpMessageOpt("-upnp", _("Use UPnP to map the listening port " "(default: 1 when listening and no -proxy)")); #else strUsage += HelpMessageOpt( "-upnp", strprintf(_("Use UPnP to map the listening port (default: %u)"), 0)); #endif #endif strUsage += HelpMessageOpt("-whitebind=", _("Bind to given address and whitelist peers connecting " "to it. Use [host]:port notation for IPv6")); strUsage += HelpMessageOpt( "-whitelist=", _("Whitelist peers connecting from the given IP address (e.g. 1.2.3.4) " "or CIDR notated network (e.g. 1.2.3.0/24). Can be specified " "multiple times.") + " " + _("Whitelisted peers cannot be DoS banned and their " "transactions are always relayed, even if they are already " "in the mempool, useful e.g. for a gateway")); strUsage += HelpMessageOpt( "-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted " "peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY)); strUsage += HelpMessageOpt( "-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even " "if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY)); strUsage += HelpMessageOpt( "-maxuploadtarget=", strprintf(_("Tries to keep outbound traffic under the given target (in " "MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET)); #ifdef ENABLE_WALLET strUsage += CWallet::GetWalletHelpString(showDebug); #endif #if ENABLE_ZMQ strUsage += HelpMessageGroup(_("ZeroMQ notification options:")); strUsage += HelpMessageOpt("-zmqpubhashblock=
", _("Enable publish hash block in
")); strUsage += HelpMessageOpt("-zmqpubhashtx=
", _("Enable publish hash transaction in
")); strUsage += HelpMessageOpt("-zmqpubrawblock=
", _("Enable publish raw block in
")); strUsage += HelpMessageOpt("-zmqpubrawtx=
", _("Enable publish raw transaction in
")); #endif strUsage += HelpMessageGroup(_("Debugging/Testing options:")); strUsage += HelpMessageOpt("-uacomment=", _("Append comment to the user agent string")); if (showDebug) { strUsage += HelpMessageOpt( "-checkblocks=", strprintf( _("How many blocks to check at startup (default: %u, 0 = all)"), DEFAULT_CHECKBLOCKS)); strUsage += HelpMessageOpt("-checklevel=", strprintf(_("How thorough the block verification of " "-checkblocks is (0-4, default: %u)"), DEFAULT_CHECKLEVEL)); strUsage += HelpMessageOpt( "-checkblockindex", strprintf( "Do a full consistency check for mapBlockIndex, " "setBlockIndexCandidates, chainActive and mapBlocksUnlinked " "occasionally. Also sets -checkmempool (default: %d)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkmempool=", strprintf( "Run checks every transactions (default: %d)", Params(CBaseChainParams::MAIN).DefaultConsistencyChecks())); strUsage += HelpMessageOpt( "-checkpoints", strprintf("Disable expensive verification for " "known chain history (default: %d)", DEFAULT_CHECKPOINTS_ENABLED)); strUsage += HelpMessageOpt( "-disablesafemode", strprintf("Disable safemode, override a real " "safe mode event (default: %d)", DEFAULT_DISABLE_SAFEMODE)); strUsage += HelpMessageOpt( "-testsafemode", strprintf("Force safe mode (default: %d)", DEFAULT_TESTSAFEMODE)); strUsage += HelpMessageOpt("-dropmessagestest=", "Randomly drop 1 of every network messages"); strUsage += HelpMessageOpt("-fuzzmessagestest=", "Randomly fuzz 1 of every network messages"); strUsage += HelpMessageOpt( "-stopafterblockimport", strprintf( "Stop running after importing blocks from disk (default: %d)", DEFAULT_STOPAFTERBLOCKIMPORT)); strUsage += HelpMessageOpt( "-stopatheight", strprintf("Stop running after reaching the given " "height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT)); strUsage += HelpMessageOpt( "-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool " "ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT)); strUsage += HelpMessageOpt("-limitancestorsize=", strprintf("Do not accept transactions whose size " "with all in-mempool ancestors exceeds " " kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have " " or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT)); strUsage += HelpMessageOpt( "-limitdescendantsize=", strprintf("Do not accept transactions if any ancestor would have " "more than kilobytes of in-mempool descendants " "(default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT)); strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified " "BIP9 deployment (regtest-only)"); } strUsage += HelpMessageOpt( "-debug=", strprintf(_("Output debugging information (default: %u, supplying " " is optional)"), 0) + ". " + _("If is not supplied or if = 1, " "output all debugging information.") + _(" can be:") + " " + ListLogCategories() + "."); strUsage += HelpMessageOpt( "-debugexclude=", strprintf(_("Exclude debugging information for a category. Can be used " "in conjunction with -debug=1 to output debug logs for all " "categories except one or more specified categories."))); if (showDebug) { strUsage += HelpMessageOpt( "-nodebug", "Turn off debugging messages, same as -debug=0"); } strUsage += HelpMessageOpt( "-help-debug", _("Show all debugging options (usage: --help -help-debug)")); strUsage += HelpMessageOpt( "-logips", strprintf(_("Include IP addresses in debug output (default: %d)"), DEFAULT_LOGIPS)); strUsage += HelpMessageOpt( "-logtimestamps", strprintf(_("Prepend debug output with timestamp (default: %d)"), DEFAULT_LOGTIMESTAMPS)); if (showDebug) { strUsage += HelpMessageOpt( "-logtimemicros", strprintf( "Add microsecond precision to debug timestamps (default: %d)", DEFAULT_LOGTIMEMICROS)); strUsage += HelpMessageOpt( "-mocktime=", "Replace actual time with seconds since epoch (default: 0)"); strUsage += HelpMessageOpt( "-limitfreerelay=", strprintf("Continuously rate-limit free transactions to *1000 " "bytes per minute (default: %u)", DEFAULT_LIMITFREERELAY)); strUsage += HelpMessageOpt("-relaypriority", strprintf("Require high priority for relaying free " "or low-fee transactions (default: %d)", DEFAULT_RELAYPRIORITY)); strUsage += HelpMessageOpt( "-maxsigcachesize=", strprintf("Limit size of signature cache to MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxscriptcachesize=", strprintf("Limit size of script cache to MiB (default: %u)", DEFAULT_MAX_SCRIPT_CACHE_SIZE)); strUsage += HelpMessageOpt( "-maxtipage=", strprintf("Maximum tip age in seconds to consider node in initial " "block download (default: %u)", DEFAULT_MAX_TIP_AGE)); } strUsage += HelpMessageOpt( "-minrelaytxfee=", strprintf( _("Fees (in %s/kB) smaller than this are considered zero fee for " "relaying, mining and transaction creation (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_MIN_RELAY_TX_FEE))); strUsage += HelpMessageOpt( "-maxtxfee=", strprintf(_("Maximum total fees (in %s) to use in a single wallet " "transaction or raw transaction; setting this too low may " "abort large transactions (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_TRANSACTION_MAXFEE))); strUsage += HelpMessageOpt( "-printtoconsole", _("Send trace/debug info to console instead of debug.log file")); if (showDebug) { strUsage += HelpMessageOpt( "-printpriority", strprintf("Log transaction priority and fee per " "kB when mining blocks (default: %d)", DEFAULT_PRINTPRIORITY)); } strUsage += HelpMessageOpt("-shrinkdebugfile", _("Shrink debug.log file on client startup " "(default: 1 when no -debug)")); AppendParamsHelpMessages(strUsage, showDebug); strUsage += HelpMessageGroup(_("Node relay options:")); if (showDebug) { strUsage += HelpMessageOpt( "-acceptnonstdtxn", strprintf( "Relay and mine \"non-standard\" transactions (%sdefault: %d)", "testnet/regtest only; ", !Params(CBaseChainParams::TESTNET).RequireStandard())); strUsage += HelpMessageOpt("-excessiveblocksize=", strprintf(_("Do not accept blocks larger than this " "limit, in bytes (default: %d)"), LEGACY_MAX_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-incrementalrelayfee=", strprintf( "Fee rate (in %s/kB) used to define cost of relay, used for " "mempool limiting and BIP 125 replacement. (default: %s)", CURRENCY_UNIT, FormatMoney(DEFAULT_INCREMENTAL_RELAY_FEE))); strUsage += HelpMessageOpt( "-dustrelayfee=", strprintf("Fee rate (in %s/kB) used to defined dust, the value of " "an output such that it will cost about 1/3 of its value " "in fees at this fee rate to spend it. (default: %s)", CURRENCY_UNIT, FormatMoney(DUST_RELAY_TX_FEE))); } strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Equivalent bytes per sigop in transactions " "for relay and mining (default: %u)"), DEFAULT_BYTES_PER_SIGOP)); strUsage += HelpMessageOpt( "-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %d)"), DEFAULT_ACCEPT_DATACARRIER)); strUsage += HelpMessageOpt( "-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we " "relay and mine (default: %u)"), MAX_OP_RETURN_RELAY)); strUsage += HelpMessageGroup(_("Block creation options:")); strUsage += HelpMessageOpt( "-blockmaxsize=", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_MAX_GENERATED_BLOCK_SIZE)); strUsage += HelpMessageOpt( "-blockprioritypercentage=", strprintf(_("Set maximum percentage of a block reserved to " "high-priority/low-fee transactions (default: %d)"), DEFAULT_BLOCK_PRIORITY_PERCENTAGE)); strUsage += HelpMessageOpt( "-blockmintxfee=", strprintf(_("Set lowest fee rate (in %s/kB) for transactions to be " "included in block creation. (default: %s)"), CURRENCY_UNIT, FormatMoney(DEFAULT_BLOCK_MIN_TX_FEE))); if (showDebug) strUsage += HelpMessageOpt("-blockversion=", "Override block version to test forking scenarios"); strUsage += HelpMessageGroup(_("RPC server options:")); strUsage += HelpMessageOpt("-server", _("Accept command line and JSON-RPC commands")); strUsage += HelpMessageOpt( "-rest", strprintf(_("Accept public REST requests (default: %d)"), DEFAULT_REST_ENABLE)); strUsage += HelpMessageOpt( "-rpcbind=", _("Bind to given address to listen for JSON-RPC connections. Use " "[host]:port notation for IPv6. This option can be specified " "multiple times (default: bind to all interfaces)")); strUsage += HelpMessageOpt("-rpccookiefile=", _("Location of the auth cookie (default: data dir)")); strUsage += HelpMessageOpt("-rpcuser=", _("Username for JSON-RPC connections")); strUsage += HelpMessageOpt("-rpcpassword=", _("Password for JSON-RPC connections")); strUsage += HelpMessageOpt( "-rpcauth=", _("Username and hashed password for JSON-RPC connections. The field " " comes in the format: :$. A canonical " "python script is included in share/rpcuser. The client then " "connects normally using the " "rpcuser=/rpcpassword= pair of arguments. This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcport=", strprintf(_("Listen for JSON-RPC connections on (default: %u or " "testnet: %u)"), BaseParams(CBaseChainParams::MAIN).RPCPort(), BaseParams(CBaseChainParams::TESTNET).RPCPort())); strUsage += HelpMessageOpt( "-rpcallowip=", _("Allow JSON-RPC connections from specified source. Valid for " "are a single IP (e.g. 1.2.3.4), a network/netmask (e.g. " "1.2.3.4/255.255.255.0) or a network/CIDR (e.g. 1.2.3.4/24). This " "option can be specified multiple times")); strUsage += HelpMessageOpt( "-rpcthreads=", strprintf( _("Set the number of threads to service RPC calls (default: %d)"), DEFAULT_HTTP_THREADS)); if (showDebug) { strUsage += HelpMessageOpt( "-rpcworkqueue=", strprintf("Set the depth of the work queue to " "service RPC calls (default: %d)", DEFAULT_HTTP_WORKQUEUE)); strUsage += HelpMessageOpt( "-rpcservertimeout=", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT)); } return strUsage; } std::string LicenseInfo() { const std::string URL_SOURCE_CODE = ""; const std::string URL_WEBSITE = ""; return CopyrightHolders( strprintf(_("Copyright (C) %i-%i"), 2009, COPYRIGHT_YEAR) + " ") + "\n" + "\n" + strprintf(_("Please contribute if you find %s useful. " "Visit %s for further information about the software."), PACKAGE_NAME, URL_WEBSITE) + "\n" + strprintf(_("The source code is available from %s."), URL_SOURCE_CODE) + "\n" + "\n" + _("This is experimental software.") + "\n" + strprintf(_("Distributed under the MIT software license, see the " "accompanying file %s or %s"), "COPYING", "") + "\n" + "\n" + strprintf(_("This product includes software developed by the " "OpenSSL Project for use in the OpenSSL Toolkit %s and " "cryptographic software written by Eric Young and UPnP " "software written by Thomas Bernard."), "") + "\n"; } static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex) { if (initialSync || !pBlockIndex) return; std::string strCmd = gArgs.GetArg("-blocknotify", ""); boost::replace_all(strCmd, "%s", pBlockIndex->GetBlockHash().GetHex()); boost::thread t(runCommand, strCmd); // thread runs free } static bool fHaveGenesis = false; static boost::mutex cs_GenesisWait; static CConditionVariable condvar_GenesisWait; static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex) { if (pBlockIndex != nullptr) { { boost::unique_lock lock_GenesisWait(cs_GenesisWait); fHaveGenesis = true; } condvar_GenesisWait.notify_all(); } } struct CImportingNow { CImportingNow() { assert(fImporting == false); fImporting = true; } ~CImportingNow() { assert(fImporting == true); fImporting = false; } }; // If we're using -prune with -reindex, then delete block files that will be // ignored by the reindex. Since reindexing works by starting at block file 0 // and looping until a blockfile is missing, do the same here to delete any // later block files after a gap. Also delete all rev files since they'll be // rewritten by the reindex anyway. This ensures that vinfoBlockFile is in sync // with what's actually on disk by the time we start downloading, so that // pruning works correctly. void CleanupBlockRevFiles() { std::map mapBlockFiles; // Glob all blk?????.dat and rev?????.dat files from the blocks directory. // Remove the rev files immediately and insert the blk file paths into an // ordered map keyed by block file index. LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for " "-reindex with -prune\n"); fs::path blocksdir = GetDataDir() / "blocks"; for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) { if (is_regular_file(*it) && it->path().filename().string().length() == 12 && it->path().filename().string().substr(8, 4) == ".dat") { if (it->path().filename().string().substr(0, 3) == "blk") mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path(); else if (it->path().filename().string().substr(0, 3) == "rev") remove(it->path()); } } // Remove all block files that aren't part of a contiguous set starting at // zero by walking the ordered map (keys are block file indices) by keeping // a separate counter. Once we hit a gap (or if 0 doesn't exist) start // removing block files. int nContigCounter = 0; for (const std::pair &item : mapBlockFiles) { if (atoi(item.first) == nContigCounter) { nContigCounter++; continue; } remove(item.second); } } void ThreadImport(const Config &config, std::vector vImportFiles) { RenameThread("bitcoin-loadblk"); { CImportingNow imp; // -reindex if (fReindex) { int nFile = 0; while (true) { CDiskBlockPos pos(nFile, 0); if (!fs::exists(GetBlockPosFilename(pos, "blk"))) { // No block files left to reindex break; } FILE *file = OpenBlockFile(pos, true); if (!file) { // This error is logged in OpenBlockFile break; } LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); LoadExternalBlockFile(config, file, &pos); nFile++; } pblocktree->WriteReindexing(false); fReindex = false; LogPrintf("Reindexing finished\n"); // To avoid ending up in a situation without genesis block, re-try // initializing (no-op if reindexing worked): InitBlockIndex(config); } // hardcoded $DATADIR/bootstrap.dat fs::path pathBootstrap = GetDataDir() / "bootstrap.dat"; if (fs::exists(pathBootstrap)) { FILE *file = fsbridge::fopen(pathBootstrap, "rb"); if (file) { fs::path pathBootstrapOld = GetDataDir() / "bootstrap.dat.old"; LogPrintf("Importing bootstrap.dat...\n"); LoadExternalBlockFile(config, file); RenameOver(pathBootstrap, pathBootstrapOld); } else { LogPrintf("Warning: Could not open bootstrap file %s\n", pathBootstrap.string()); } } // -loadblock= for (const fs::path &path : vImportFiles) { FILE *file = fsbridge::fopen(path, "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", path.string()); LoadExternalBlockFile(config, file); } else { LogPrintf("Warning: Could not open blocks file %s\n", path.string()); } } // scan for better chains in the block chain database, that are not yet // connected in the active best chain CValidationState state; if (!ActivateBestChain(config, state)) { LogPrintf("Failed to connect best block"); StartShutdown(); } if (gArgs.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); } } // End scope of CImportingNow if (gArgs.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { LoadMempool(config); fDumpMempoolLater = !fRequestShutdown; } } /** Sanity checks * Ensure that Bitcoin is running in a usable environment with all * necessary library support. */ bool InitSanityCheck(void) { if (!ECC_InitSanityCheck()) { InitError( "Elliptic curve cryptography sanity check failure. Aborting."); return false; } if (!glibc_sanity_test() || !glibcxx_sanity_test()) { return false; } if (!Random_SanityCheck()) { InitError("OS cryptographic RNG sanity check failure. Aborting."); return false; } return true; } static bool AppInitServers(Config &config, boost::thread_group &threadGroup) { RPCServer::OnStarted(&OnRPCStarted); RPCServer::OnStopped(&OnRPCStopped); RPCServer::OnPreCommand(&OnRPCPreCommand); if (!InitHTTPServer(config)) return false; if (!StartRPC()) return false; if (!StartHTTPRPC()) return false; if (gArgs.GetBoolArg("-rest", DEFAULT_REST_ENABLE) && !StartREST()) return false; if (!StartHTTPServer()) return false; return true; } // Parameter interaction based on rules void InitParameterInteraction() { // when specifying an explicit binding address, you want to listen on it // even when -connect or -proxy is specified. if (gArgs.IsArgSet("-bind")) { if (gArgs.SoftSetBoolArg("-listen", true)) LogPrintf( "%s: parameter interaction: -bind set -> setting -listen=1\n", __func__); } if (gArgs.IsArgSet("-whitebind")) { if (gArgs.SoftSetBoolArg("-listen", true)) LogPrintf("%s: parameter interaction: -whitebind set -> setting " "-listen=1\n", __func__); } if (gArgs.IsArgSet("-connect")) { // when only connecting to trusted nodes, do not seed via DNS, or listen // by default. if (gArgs.SoftSetBoolArg("-dnsseed", false)) LogPrintf("%s: parameter interaction: -connect set -> setting " "-dnsseed=0\n", __func__); if (gArgs.SoftSetBoolArg("-listen", false)) LogPrintf("%s: parameter interaction: -connect set -> setting " "-listen=0\n", __func__); } if (gArgs.IsArgSet("-proxy")) { // to protect privacy, do not listen by default if a default proxy // server is specified. if (gArgs.SoftSetBoolArg("-listen", false)) LogPrintf( "%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__); // to protect privacy, do not use UPNP when a proxy is set. The user may // still specify -listen=1 to listen locally, so don't rely on this // happening through -listen below. if (gArgs.SoftSetBoolArg("-upnp", false)) LogPrintf( "%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__); // to protect privacy, do not discover addresses by default if (gArgs.SoftSetBoolArg("-discover", false)) LogPrintf("%s: parameter interaction: -proxy set -> setting " "-discover=0\n", __func__); } if (!gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening // (pointless) if (gArgs.SoftSetBoolArg("-upnp", false)) LogPrintf( "%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__); if (gArgs.SoftSetBoolArg("-discover", false)) LogPrintf( "%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__); if (gArgs.SoftSetBoolArg("-listenonion", false)) LogPrintf("%s: parameter interaction: -listen=0 -> setting " "-listenonion=0\n", __func__); } if (gArgs.IsArgSet("-externalip")) { // if an explicit public IP is specified, do not try to find others if (gArgs.SoftSetBoolArg("-discover", false)) LogPrintf("%s: parameter interaction: -externalip set -> setting " "-discover=0\n", __func__); } // disable whitelistrelay in blocksonly mode if (gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) { if (gArgs.SoftSetBoolArg("-whitelistrelay", false)) LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting " "-whitelistrelay=0\n", __func__); } // Forcing relay from whitelisted hosts implies we will accept relays from // them in the first place. if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { if (gArgs.SoftSetBoolArg("-whitelistrelay", true)) LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> " "setting -whitelistrelay=1\n", __func__); } } static std::string ResolveErrMsg(const char *const optname, const std::string &strBind) { return strprintf(_("Cannot resolve -%s address: '%s'"), optname, strBind); } void InitLogging() { fPrintToConsole = gArgs.GetBoolArg("-printtoconsole", false); fLogTimestamps = gArgs.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS); fLogTimeMicros = gArgs.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS); fLogIPs = gArgs.GetBoolArg("-logips", DEFAULT_LOGIPS); LogPrintf("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"); LogPrintf("%s version %s\n", CLIENT_NAME, FormatFullVersion()); } namespace { // Variables internal to initialization process only ServiceFlags nRelevantServices = NODE_NETWORK; int nMaxConnections; int nUserMaxConnections; int nFD; ServiceFlags nLocalServices = NODE_NETWORK; } // namespace [[noreturn]] static void new_handler_terminate() { // Rather than throwing std::bad-alloc if allocation fails, terminate // immediately to (try to) avoid chain corruption. Since LogPrintf may // itself allocate memory, set the handler directly to terminate first. std::set_new_handler(std::terminate); LogPrintf("Error: Out of memory. Terminating.\n"); // The log was successful, terminate now. std::terminate(); }; bool AppInitBasicSetup() { // Step 1: setup #ifdef _MSC_VER // Turn off Microsoft heap dump noise _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, CreateFileA("NUL", GENERIC_WRITE, 0, nullptr, OPEN_EXISTING, 0, 0)); #endif #if _MSC_VER >= 1400 // Disable confusing "helpful" text message on abort, Ctrl-C _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT); #endif #ifdef WIN32 // Enable Data Execution Prevention (DEP) // Minimum supported OS versions: WinXP SP3, WinVista >= SP1, Win Server 2008 // A failure is non-critical and needs no further attention! #ifndef PROCESS_DEP_ENABLE // We define this here, because GCCs winbase.h limits this to _WIN32_WINNT >= // 0x0601 (Windows 7), which is not correct. Can be removed, when GCCs winbase.h // is fixed! #define PROCESS_DEP_ENABLE 0x00000001 #endif typedef BOOL(WINAPI * PSETPROCDEPPOL)(DWORD); PSETPROCDEPPOL setProcDEPPol = (PSETPROCDEPPOL)GetProcAddress( GetModuleHandleA("Kernel32.dll"), "SetProcessDEPPolicy"); if (setProcDEPPol != nullptr) setProcDEPPol(PROCESS_DEP_ENABLE); #endif if (!SetupNetworking()) return InitError("Initializing networking failed"); #ifndef WIN32 if (!gArgs.GetBoolArg("-sysperms", false)) { umask(077); } // Clean shutdown on SIGTERM struct sigaction sa; sa.sa_handler = HandleSIGTERM; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sigaction(SIGTERM, &sa, nullptr); sigaction(SIGINT, &sa, nullptr); // Reopen debug.log on SIGHUP struct sigaction sa_hup; sa_hup.sa_handler = HandleSIGHUP; sigemptyset(&sa_hup.sa_mask); sa_hup.sa_flags = 0; sigaction(SIGHUP, &sa_hup, nullptr); // Ignore SIGPIPE, otherwise it will bring the daemon down if the client // closes unexpectedly signal(SIGPIPE, SIG_IGN); #endif std::set_new_handler(new_handler_terminate); return true; } bool AppInitParameterInteraction(Config &config) { - const CChainParams &chainparams = Params(); + const CChainParams &chainparams = config.GetChainParams(); // Step 2: parameter interactions // also see: InitParameterInteraction() // if using block pruning, then disallow txindex if (gArgs.GetArg("-prune", 0)) { if (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) return InitError(_("Prune mode is incompatible with -txindex.")); } // if space reserved for high priority transactions is misconfigured // stop program execution and warn the user with a proper error message const int64_t blkprio = gArgs.GetArg("-blockprioritypercentage", DEFAULT_BLOCK_PRIORITY_PERCENTAGE); if (!config.SetBlockPriorityPercentage(blkprio)) { return InitError(_("Block priority percentage has to belong to the " "[0..100] interval.")); } // Make sure enough file descriptors are available int nBind = std::max( (gArgs.IsArgSet("-bind") ? gArgs.GetArgs("-bind").size() : 0) + (gArgs.IsArgSet("-whitebind") ? gArgs.GetArgs("-whitebind").size() : 0), size_t(1)); nUserMaxConnections = gArgs.GetArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS); nMaxConnections = std::max(nUserMaxConnections, 0); // Trim requested connection counts, to fit into system limitations nMaxConnections = std::max(std::min(nMaxConnections, (int)(FD_SETSIZE - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS)), 0); nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS); if (nFD < MIN_CORE_FILEDESCRIPTORS) return InitError(_("Not enough file descriptors available.")); nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections); if (nMaxConnections < nUserMaxConnections) { InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, " "because of system limitations."), nUserMaxConnections, nMaxConnections)); } // Step 3: parameter-to-internal-flags if (gArgs.IsArgSet("-debug")) { // Special-case: if -debug=0/-nodebug is set, turn off debugging // messages const std::vector &categories = gArgs.GetArgs("-debug"); if (find(categories.begin(), categories.end(), std::string("0")) == categories.end()) { for (const auto &cat : categories) { uint32_t flag = 0; if (!GetLogCategory(&flag, &cat)) { InitWarning( strprintf(_("Unsupported logging category %s=%s."), "-debug", cat)); } logCategories |= flag; } } } // Now remove the logging categories which were explicitly excluded if (gArgs.IsArgSet("-debugexclude")) { for (const std::string &cat : gArgs.GetArgs("-debugexclude")) { uint32_t flag; if (!GetLogCategory(&flag, &cat)) { InitWarning(strprintf(_("Unsupported logging category %s=%s."), "-debugexclude", cat)); } logCategories &= ~flag; } } // Check for -debugnet if (gArgs.GetBoolArg("-debugnet", false)) InitWarning( _("Unsupported argument -debugnet ignored, use -debug=net.")); // Check for -socks - as this is a privacy risk to continue, exit here if (gArgs.IsArgSet("-socks")) return InitError( _("Unsupported argument -socks found. Setting SOCKS version isn't " "possible anymore, only SOCKS5 proxies are supported.")); // Check for -tor - as this is a privacy risk to continue, exit here if (gArgs.GetBoolArg("-tor", false)) return InitError(_("Unsupported argument -tor found, use -onion.")); if (gArgs.GetBoolArg("-benchmark", false)) InitWarning( _("Unsupported argument -benchmark ignored, use -debug=bench.")); if (gArgs.GetBoolArg("-whitelistalwaysrelay", false)) InitWarning(_("Unsupported argument -whitelistalwaysrelay ignored, use " "-whitelistrelay and/or -whitelistforcerelay.")); if (gArgs.IsArgSet("-blockminsize")) InitWarning("Unsupported argument -blockminsize ignored."); // Checkmempool and checkblockindex default to true in regtest mode int ratio = std::min( std::max( gArgs.GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000); if (ratio != 0) { mempool.setSanityCheck(1.0 / ratio); } fCheckBlockIndex = gArgs.GetBoolArg("-checkblockindex", chainparams.DefaultConsistencyChecks()); fCheckpointsEnabled = gArgs.GetBoolArg("-checkpoints", DEFAULT_CHECKPOINTS_ENABLED); hashAssumeValid = uint256S( gArgs.GetArg("-assumevalid", chainparams.GetConsensus().defaultAssumeValid.GetHex())); if (!hashAssumeValid.IsNull()) LogPrintf("Assuming ancestors of block %s have valid signatures.\n", hashAssumeValid.GetHex()); else LogPrintf("Validating signatures for all blocks.\n"); // mempool limits int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; int64_t nMempoolSizeMin = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT) * 1000 * 40; if (nMempoolSizeMax < 0 || nMempoolSizeMax < nMempoolSizeMin) return InitError(strprintf(_("-maxmempool must be at least %d MB"), std::ceil(nMempoolSizeMin / 1000000.0))); // Incremental relay fee sets the minimimum feerate increase necessary for // BIP 125 replacement in the mempool and the amount the mempool min fee // increases above the feerate of txs evicted due to mempool limiting. if (gArgs.IsArgSet("-incrementalrelayfee")) { Amount n(0); if (!ParseMoney(gArgs.GetArg("-incrementalrelayfee", ""), n)) return InitError( AmountErrMsg("incrementalrelayfee", gArgs.GetArg("-incrementalrelayfee", ""))); incrementalRelayFee = CFeeRate(n); } // -par=0 means autodetect, but nScriptCheckThreads==0 means no concurrency nScriptCheckThreads = gArgs.GetArg("-par", DEFAULT_SCRIPTCHECK_THREADS); if (nScriptCheckThreads <= 0) nScriptCheckThreads += GetNumCores(); if (nScriptCheckThreads <= 1) nScriptCheckThreads = 0; else if (nScriptCheckThreads > MAX_SCRIPTCHECK_THREADS) nScriptCheckThreads = MAX_SCRIPTCHECK_THREADS; // Configure excessive block size. const uint64_t nProposedExcessiveBlockSize = gArgs.GetArg("-excessiveblocksize", DEFAULT_MAX_BLOCK_SIZE); if (!config.SetMaxBlockSize(nProposedExcessiveBlockSize)) { return InitError( _("Excessive block size must be > 1,000,000 bytes (1MB)")); } // Check blockmaxsize does not exceed maximum accepted block size. const uint64_t nProposedMaxGeneratedBlockSize = gArgs.GetArg("-blockmaxsize", DEFAULT_MAX_GENERATED_BLOCK_SIZE); if (nProposedMaxGeneratedBlockSize > config.GetMaxBlockSize()) { auto msg = _("Max generated block size (blockmaxsize) cannot exceed " "the excessive block size (excessiveblocksize)"); return InitError(msg); } // block pruning; get the amount of disk space (in MiB) to allot for block & // undo files int64_t nPruneArg = gArgs.GetArg("-prune", 0); if (nPruneArg < 0) { return InitError( _("Prune cannot be configured with a negative value.")); } nPruneTarget = (uint64_t)nPruneArg * 1024 * 1024; if (nPruneArg == 1) { // manual pruning: -prune=1 LogPrintf("Block pruning enabled. Use RPC call " "pruneblockchain(height) to manually prune block and undo " "files.\n"); nPruneTarget = std::numeric_limits::max(); fPruneMode = true; } else if (nPruneTarget) { if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) { return InitError( strprintf(_("Prune configured below the minimum of %d MiB. " "Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)); } LogPrintf("Prune configured to target %uMiB on disk for block and undo " "files.\n", nPruneTarget / 1024 / 1024); fPruneMode = true; } RegisterAllRPCCommands(tableRPC); #ifdef ENABLE_WALLET RegisterWalletRPCCommands(tableRPC); RegisterDumpRPCCommands(tableRPC); #endif nConnectTimeout = gArgs.GetArg("-timeout", DEFAULT_CONNECT_TIMEOUT); if (nConnectTimeout <= 0) nConnectTimeout = DEFAULT_CONNECT_TIMEOUT; // Fee-per-kilobyte amount considered the same as "free". If you are mining, // be careful setting this: if you set it to zero then a transaction spammer // can cheaply fill blocks using 1-satoshi-fee transactions. It should be // set above the real cost to you of processing a transaction. if (gArgs.IsArgSet("-minrelaytxfee")) { Amount n(0); auto parsed = ParseMoney(gArgs.GetArg("-minrelaytxfee", ""), n); if (!parsed || Amount(0) == n) return InitError(AmountErrMsg("minrelaytxfee", gArgs.GetArg("-minrelaytxfee", ""))); // High fee check is done afterward in CWallet::ParameterInteraction() ::minRelayTxFee = CFeeRate(n); } else if (incrementalRelayFee > ::minRelayTxFee) { // Allow only setting incrementalRelayFee to control both ::minRelayTxFee = incrementalRelayFee; LogPrintf( "Increasing minrelaytxfee to %s to match incrementalrelayfee\n", ::minRelayTxFee.ToString()); } // Sanity check argument for min fee for including tx in block // TODO: Harmonize which arguments need sanity checking and where that // happens. if (gArgs.IsArgSet("-blockmintxfee")) { Amount n(0); if (!ParseMoney(gArgs.GetArg("-blockmintxfee", ""), n)) return InitError(AmountErrMsg("blockmintxfee", gArgs.GetArg("-blockmintxfee", ""))); } // Feerate used to define dust. Shouldn't be changed lightly as old // implementations may inadvertently create non-standard transactions. if (gArgs.IsArgSet("-dustrelayfee")) { Amount n(0); auto parsed = ParseMoney(gArgs.GetArg("-dustrelayfee", ""), n); if (!parsed || Amount(0) == n) return InitError(AmountErrMsg("dustrelayfee", gArgs.GetArg("-dustrelayfee", ""))); dustRelayFee = CFeeRate(n); } fRequireStandard = !gArgs.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard()); if (chainparams.RequireStandard() && !fRequireStandard) return InitError( strprintf("acceptnonstdtxn is not currently supported for %s chain", chainparams.NetworkIDString())); nBytesPerSigOp = gArgs.GetArg("-bytespersigop", nBytesPerSigOp); #ifdef ENABLE_WALLET if (!CWallet::ParameterInteraction()) return false; #endif fIsBareMultisigStd = gArgs.GetBoolArg("-permitbaremultisig", DEFAULT_PERMIT_BAREMULTISIG); fAcceptDatacarrier = gArgs.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER); nMaxDatacarrierBytes = gArgs.GetArg("-datacarriersize", nMaxDatacarrierBytes); // Option to startup with mocktime set (used for regression testing): SetMockTime(gArgs.GetArg("-mocktime", 0)); // SetMockTime(0) is a no-op if (gArgs.GetBoolArg("-peerbloomfilters", DEFAULT_PEERBLOOMFILTERS)) nLocalServices = ServiceFlags(nLocalServices | NODE_BLOOM); // Signal Bitcoin Cash support. // TODO: remove some time after the hardfork when no longer needed // to differentiate the network nodes. nLocalServices = ServiceFlags(nLocalServices | NODE_BITCOIN_CASH); nMaxTipAge = gArgs.GetArg("-maxtipage", DEFAULT_MAX_TIP_AGE); if (gArgs.IsArgSet("-bip9params")) { // Allow overriding BIP9 parameters for testing if (!chainparams.MineBlocksOnDemand()) { return InitError( "BIP9 parameters may only be overridden on regtest."); } for (const std::string &strDeployment : gArgs.GetArgs("-bip9params")) { std::vector vDeploymentParams; boost::split(vDeploymentParams, strDeployment, boost::is_any_of(":")); if (vDeploymentParams.size() != 3) { return InitError("BIP9 parameters malformed, expecting " "deployment:start:end"); } int64_t nStartTime, nTimeout; if (!ParseInt64(vDeploymentParams[1], &nStartTime)) { return InitError( strprintf("Invalid nStartTime (%s)", vDeploymentParams[1])); } if (!ParseInt64(vDeploymentParams[2], &nTimeout)) { return InitError( strprintf("Invalid nTimeout (%s)", vDeploymentParams[2])); } bool found = false; for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { if (vDeploymentParams[0].compare( VersionBitsDeploymentInfo[j].name) == 0) { UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(j), nStartTime, nTimeout); found = true; LogPrintf("Setting BIP9 activation parameters for %s to " "start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout); break; } } if (!found) { return InitError( strprintf("Invalid deployment (%s)", vDeploymentParams[0])); } } } return true; } static bool LockDataDirectory(bool probeOnly) { std::string strDataDir = GetDataDir().string(); // Make sure only a single Bitcoin process is using the data directory. fs::path pathLockFile = GetDataDir() / ".lock"; // empty lock file; created if it doesn't exist. FILE *file = fsbridge::fopen(pathLockFile, "a"); if (file) fclose(file); try { static boost::interprocess::file_lock lock( pathLockFile.string().c_str()); if (!lock.try_lock()) { return InitError( strprintf(_("Cannot obtain a lock on data directory %s. %s is " "probably already running."), strDataDir, _(PACKAGE_NAME))); } if (probeOnly) { lock.unlock(); } } catch (const boost::interprocess::interprocess_exception &e) { return InitError(strprintf(_("Cannot obtain a lock on data directory " "%s. %s is probably already running.") + " %s.", strDataDir, _(PACKAGE_NAME), e.what())); } return true; } bool AppInitSanityChecks() { // Step 4: sanity checks // Initialize elliptic curve code std::string sha256_algo = SHA256AutoDetect(); LogPrintf("Using the '%s' SHA256 implementation\n", sha256_algo); RandomInit(); ECC_Start(); globalVerifyHandle.reset(new ECCVerifyHandle()); // Sanity check if (!InitSanityCheck()) { return InitError(strprintf( _("Initialization sanity check failed. %s is shutting down."), _(PACKAGE_NAME))); } // Probe the data directory lock to give an early error message, if possible return LockDataDirectory(true); } bool AppInitMain(Config &config, boost::thread_group &threadGroup, CScheduler &scheduler) { const CChainParams &chainparams = Params(); // Step 4a: application initialization // After daemonization get the data directory lock again and hold on to it // until exit. This creates a slight window for a race condition to happen, // however this condition is harmless: it will at most make us exit without // printing a message to console. if (!LockDataDirectory(false)) { // Detailed error printed inside LockDataDirectory return false; } #ifndef WIN32 CreatePidFile(GetPidFile(), getpid()); #endif if (gArgs.GetBoolArg("-shrinkdebugfile", logCategories != BCLog::NONE)) { // Do this first since it both loads a bunch of debug.log into memory, // and because this needs to happen before any other debug.log printing. ShrinkDebugFile(); } if (fPrintToDebugLog) { OpenDebugLog(); } if (!fLogTimestamps) { LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime())); } LogPrintf("Default data directory %s\n", GetDefaultDataDir().string()); LogPrintf("Using data directory %s\n", GetDataDir().string()); LogPrintf( "Using config file %s\n", GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string()); LogPrintf("Using at most %i automatic connections (%i file descriptors " "available)\n", nMaxConnections, nFD); InitSignatureCache(); InitScriptExecutionCache(); LogPrintf("Using %u threads for script verification\n", nScriptCheckThreads); if (nScriptCheckThreads) { for (int i = 0; i < nScriptCheckThreads - 1; i++) { threadGroup.create_thread(&ThreadScriptCheck); } } // Start the lightweight task scheduler thread CScheduler::Function serviceLoop = boost::bind(&CScheduler::serviceQueue, &scheduler); threadGroup.create_thread(boost::bind(&TraceThread, "scheduler", serviceLoop)); /* Start the RPC server already. It will be started in "warmup" mode * and not really process calls already (but it will signify connections * that the server is there and will be ready later). Warmup mode will * be disabled when initialisation is finished. */ if (gArgs.GetBoolArg("-server", false)) { uiInterface.InitMessage.connect(SetRPCWarmupStatus); if (!AppInitServers(config, threadGroup)) { return InitError( _("Unable to start HTTP server. See debug log for details.")); } } int64_t nStart; // Step 5: verify wallet database integrity #ifdef ENABLE_WALLET if (!CWallet::Verify()) { return false; } #endif // Step 6: network initialization // Note that we absolutely cannot open any actual connections // until the very end ("start node") as the UTXO/block state // is not yet setup and may end up being set up twice if we // need to reindex later. assert(!g_connman); g_connman = std::unique_ptr( new CConnman(config, GetRand(std::numeric_limits::max()), GetRand(std::numeric_limits::max()))); CConnman &connman = *g_connman; peerLogic.reset(new PeerLogicValidation(&connman)); RegisterValidationInterface(peerLogic.get()); RegisterNodeSignals(GetNodeSignals()); if (gArgs.IsArgSet("-onlynet")) { std::set nets; for (const std::string &snet : gArgs.GetArgs("-onlynet")) { enum Network net = ParseNetwork(snet); if (net == NET_UNROUTABLE) return InitError(strprintf( _("Unknown network specified in -onlynet: '%s'"), snet)); nets.insert(net); } for (int n = 0; n < NET_MAX; n++) { enum Network net = (enum Network)n; if (!nets.count(net)) SetLimited(net); } } if (gArgs.IsArgSet("-whitelist")) { for (const std::string &net : gArgs.GetArgs("-whitelist")) { CSubNet subnet; LookupSubNet(net.c_str(), subnet); if (!subnet.IsValid()) return InitError(strprintf( _("Invalid netmask specified in -whitelist: '%s'"), net)); connman.AddWhitelistedRange(subnet); } } bool proxyRandomize = gArgs.GetBoolArg("-proxyrandomize", DEFAULT_PROXYRANDOMIZE); // -proxy sets a proxy for all outgoing network traffic // -noproxy (or -proxy=0) as well as the empty string can be used to not set // a proxy, this is the default std::string proxyArg = gArgs.GetArg("-proxy", ""); SetLimited(NET_TOR); if (proxyArg != "" && proxyArg != "0") { CService resolved(LookupNumeric(proxyArg.c_str(), 9050)); proxyType addrProxy = proxyType(resolved, proxyRandomize); if (!addrProxy.IsValid()) { return InitError( strprintf(_("Invalid -proxy address: '%s'"), proxyArg)); } SetProxy(NET_IPV4, addrProxy); SetProxy(NET_IPV6, addrProxy); SetProxy(NET_TOR, addrProxy); SetNameProxy(addrProxy); SetLimited(NET_TOR, false); // by default, -proxy sets onion as // reachable, unless -noonion later } // -onion can be used to set only a proxy for .onion, or override normal // proxy for .onion addresses. // -noonion (or -onion=0) disables connecting to .onion entirely. An empty // string is used to not override the onion proxy (in which case it defaults // to -proxy set above, or none) std::string onionArg = gArgs.GetArg("-onion", ""); if (onionArg != "") { if (onionArg == "0") { // Handle -noonion/-onion=0 SetLimited(NET_TOR); // set onions as unreachable } else { CService resolved(LookupNumeric(onionArg.c_str(), 9050)); proxyType addrOnion = proxyType(resolved, proxyRandomize); if (!addrOnion.IsValid()) { return InitError( strprintf(_("Invalid -onion address: '%s'"), onionArg)); } SetProxy(NET_TOR, addrOnion); SetLimited(NET_TOR, false); } } // see Step 2: parameter interactions for more information about these fListen = gArgs.GetBoolArg("-listen", DEFAULT_LISTEN); fDiscover = gArgs.GetBoolArg("-discover", true); fNameLookup = gArgs.GetBoolArg("-dns", DEFAULT_NAME_LOOKUP); fRelayTxes = !gArgs.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY); if (fListen) { bool fBound = false; if (gArgs.IsArgSet("-bind")) { for (const std::string &strBind : gArgs.GetArgs("-bind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, GetListenPort(), false)) { return InitError(ResolveErrMsg("bind", strBind)); } fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR)); } } if (gArgs.IsArgSet("-whitebind")) { for (const std::string &strBind : gArgs.GetArgs("-whitebind")) { CService addrBind; if (!Lookup(strBind.c_str(), addrBind, 0, false)) { return InitError(ResolveErrMsg("whitebind", strBind)); } if (addrBind.GetPort() == 0) { return InitError(strprintf( _("Need to specify a port with -whitebind: '%s'"), strBind)); } fBound |= Bind(connman, addrBind, (BF_EXPLICIT | BF_REPORT_ERROR | BF_WHITELIST)); } } if (!gArgs.IsArgSet("-bind") && !gArgs.IsArgSet("-whitebind")) { struct in_addr inaddr_any; inaddr_any.s_addr = INADDR_ANY; fBound |= Bind(connman, CService(in6addr_any, GetListenPort()), BF_NONE); fBound |= Bind(connman, CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE); } if (!fBound) { return InitError(_("Failed to listen on any port. Use -listen=0 if " "you want this.")); } } if (gArgs.IsArgSet("-externalip")) { for (const std::string &strAddr : gArgs.GetArgs("-externalip")) { CService addrLocal; if (Lookup(strAddr.c_str(), addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid()) { AddLocal(addrLocal, LOCAL_MANUAL); } else { return InitError(ResolveErrMsg("externalip", strAddr)); } } } if (gArgs.IsArgSet("-seednode")) { for (const std::string &strDest : gArgs.GetArgs("-seednode")) { connman.AddOneShot(strDest); } } #if ENABLE_ZMQ pzmqNotificationInterface = CZMQNotificationInterface::Create(); if (pzmqNotificationInterface) { RegisterValidationInterface(pzmqNotificationInterface); } #endif // unlimited unless -maxuploadtarget is set uint64_t nMaxOutboundLimit = 0; uint64_t nMaxOutboundTimeframe = MAX_UPLOAD_TIMEFRAME; if (gArgs.IsArgSet("-maxuploadtarget")) { nMaxOutboundLimit = gArgs.GetArg("-maxuploadtarget", DEFAULT_MAX_UPLOAD_TARGET) * 1024 * 1024; } // Step 7: load block chain fReindex = gArgs.GetBoolArg("-reindex", false); bool fReindexChainState = gArgs.GetBoolArg("-reindex-chainstate", false); // cache size calculations int64_t nTotalCache = (gArgs.GetArg("-dbcache", nDefaultDbCache) << 20); // total cache cannot be less than nMinDbCache nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be greater than nMaxDbcache nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); int64_t nBlockTreeDBCache = nTotalCache / 8; nBlockTreeDBCache = std::min(nBlockTreeDBCache, (gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxBlockDBAndTxIndexCache : nMaxBlockDBCache) << 20); nTotalCache -= nBlockTreeDBCache; // use 25%-50% of the remainder for disk cache int64_t nCoinDBCache = std::min(nTotalCache / 2, (nTotalCache / 4) + (1 << 23)); // cap total coins db cache nCoinDBCache = std::min(nCoinDBCache, nMaxCoinsDBCache << 20); nTotalCache -= nCoinDBCache; // the rest goes to in-memory cache nCoinCacheUsage = nTotalCache; int64_t nMempoolSizeMax = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; LogPrintf("Cache configuration:\n"); LogPrintf("* Using %.1fMiB for block index database\n", nBlockTreeDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for chain state database\n", nCoinDBCache * (1.0 / 1024 / 1024)); LogPrintf("* Using %.1fMiB for in-memory UTXO set (plus up to %.1fMiB of " "unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024)); bool fLoaded = false; while (!fLoaded && !fRequestShutdown) { bool fReset = fReindex; std::string strLoadError; uiInterface.InitMessage(_("Loading block index...")); nStart = GetTimeMillis(); do { try { UnloadBlockIndex(); delete pcoinsTip; delete pcoinsdbview; delete pcoinscatcher; delete pblocktree; pblocktree = new CBlockTreeDB(nBlockTreeDBCache, false, fReindex); pcoinsdbview = new CCoinsViewDB(nCoinDBCache, false, fReindex || fReindexChainState); pcoinscatcher = new CCoinsViewErrorCatcher(pcoinsdbview); if (fReindex) { pblocktree->WriteReindexing(true); // If we're reindexing in prune mode, wipe away unusable // block files and all undo data files if (fPruneMode) { CleanupBlockRevFiles(); } } else if (!pcoinsdbview->Upgrade()) { strLoadError = _("Error upgrading chainstate database"); break; } if (fRequestShutdown) break; if (!LoadBlockIndex(chainparams)) { strLoadError = _("Error loading block database"); break; } // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way // around). if (!mapBlockIndex.empty() && mapBlockIndex.count( chainparams.GetConsensus().hashGenesisBlock) == 0) { return InitError(_("Incorrect or no genesis block found. " "Wrong datadir for network?")); } // Initialize the block index (no-op if non-empty database was // already loaded) if (!InitBlockIndex(config)) { strLoadError = _("Error initializing block database"); break; } // Check for changed -txindex state if (fTxIndex != gArgs.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { strLoadError = _("You need to rebuild the database using " "-reindex-chainstate to change -txindex"); break; } // Check for changed -prune state. What we are concerned about // is a user who has pruned blocks in the past, but is now // trying to run unpruned. if (fHavePruned && !fPruneMode) { strLoadError = _("You need to rebuild the database using -reindex to " "go back to unpruned mode. This will redownload the " "entire blockchain"); break; } if (!ReplayBlocks(config, pcoinsdbview)) { strLoadError = _("Unable to replay blocks. You will need to rebuild " "the database using -reindex-chainstate."); break; } pcoinsTip = new CCoinsViewCache(pcoinscatcher); LoadChainTip(chainparams); if (!fReindex && chainActive.Tip() != nullptr) { uiInterface.InitMessage(_("Rewinding blocks...")); if (!RewindBlockIndex(config)) { strLoadError = _("Unable to rewind the database to a " "pre-fork state. You will need to " "redownload the blockchain"); break; } } uiInterface.InitMessage(_("Verifying blocks...")); if (fHavePruned && gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS) > MIN_BLOCKS_TO_KEEP) { LogPrintf("Prune: pruned datadir may not have more than %d " "blocks; only checking available blocks", MIN_BLOCKS_TO_KEEP); } { LOCK(cs_main); CBlockIndex *tip = chainActive.Tip(); RPCNotifyBlockChange(true, tip); if (tip && tip->nTime > GetAdjustedTime() + MAX_FUTURE_BLOCK_TIME) { strLoadError = _("The block database contains a block which " "appears to be from the future. " "This may be due to your computer's date and " "time being set incorrectly. " "Only rebuild the block database if you are sure " "that your computer's date and time are correct"); break; } } if (!CVerifyDB().VerifyDB( config, pcoinsdbview, gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL), gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS))) { strLoadError = _("Corrupted block database detected"); break; } } catch (const std::exception &e) { LogPrintf("%s\n", e.what()); strLoadError = _("Error opening block database"); break; } fLoaded = true; } while (false); if (!fLoaded && !fRequestShutdown) { // first suggest a reindex if (!fReset) { bool fRet = uiInterface.ThreadSafeQuestion( strLoadError + ".\n\n" + _("Do you want to rebuild the block database now?"), strLoadError + ".\nPlease restart with -reindex or " "-reindex-chainstate to recover.", "", CClientUIInterface::MSG_ERROR | CClientUIInterface::BTN_ABORT); if (fRet) { fReindex = true; fRequestShutdown = false; } else { LogPrintf("Aborted block database rebuild. Exiting.\n"); return false; } } else { return InitError(strLoadError); } } } // As LoadBlockIndex can take several minutes, it's possible the user // requested to kill the GUI during the last operation. If so, exit. // As the program has not fully started yet, Shutdown() is possibly // overkill. if (fRequestShutdown) { LogPrintf("Shutdown requested. Exiting.\n"); return false; } LogPrintf(" block index %15dms\n", GetTimeMillis() - nStart); fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME; CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK, CLIENT_VERSION); // Allowed to fail as this file IS missing on first startup. if (!est_filein.IsNull()) mempool.ReadFeeEstimates(est_filein); fFeeEstimatesInitialized = true; // Encoded addresses using cashaddr instead of base58 // Activates by default on Jan, 14 config.SetCashAddrEncoding( gArgs.GetBoolArg("-usecashaddr", GetAdjustedTime() > 1515900000)); // Step 8: load wallet #ifdef ENABLE_WALLET if (!CWallet::InitLoadWallet()) return false; #else LogPrintf("No wallet support compiled in!\n"); #endif // Step 9: data directory maintenance // if pruning, unset the service bit and perform the initial blockstore // prune after any wallet rescanning has taken place. if (fPruneMode) { LogPrintf("Unsetting NODE_NETWORK on prune mode\n"); nLocalServices = ServiceFlags(nLocalServices & ~NODE_NETWORK); if (!fReindex) { uiInterface.InitMessage(_("Pruning blockstore...")); PruneAndFlush(); } } // Step 10: import blocks if (!CheckDiskSpace()) { return false; } // Either install a handler to notify us when genesis activates, or set // fHaveGenesis directly. // No locking, as this happens before any background thread is started. if (chainActive.Tip() == nullptr) { uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait); } else { fHaveGenesis = true; } if (gArgs.IsArgSet("-blocknotify")) { uiInterface.NotifyBlockTip.connect(BlockNotifyCallback); } std::vector vImportFiles; if (gArgs.IsArgSet("-loadblock")) { for (const std::string &strFile : gArgs.GetArgs("-loadblock")) { vImportFiles.push_back(strFile); } } threadGroup.create_thread( boost::bind(&ThreadImport, std::ref(config), vImportFiles)); // Wait for genesis block to be processed { boost::unique_lock lock(cs_GenesisWait); while (!fHaveGenesis) { condvar_GenesisWait.wait(lock); } uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait); } // Step 11: start node //// debug print LogPrintf("mapBlockIndex.size() = %u\n", mapBlockIndex.size()); LogPrintf("nBestHeight = %d\n", chainActive.Height()); if (gArgs.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) { StartTorControl(threadGroup, scheduler); } Discover(threadGroup); // Map ports with UPnP MapPort(gArgs.GetBoolArg("-upnp", DEFAULT_UPNP)); std::string strNodeError; CConnman::Options connOptions; connOptions.nLocalServices = nLocalServices; connOptions.nRelevantServices = nRelevantServices; connOptions.nMaxConnections = nMaxConnections; connOptions.nMaxOutbound = std::min(MAX_OUTBOUND_CONNECTIONS, connOptions.nMaxConnections); connOptions.nMaxAddnode = MAX_ADDNODE_CONNECTIONS; connOptions.nMaxFeeler = 1; connOptions.nBestHeight = chainActive.Height(); connOptions.uiInterface = &uiInterface; connOptions.nSendBufferMaxSize = 1000 * gArgs.GetArg("-maxsendbuffer", DEFAULT_MAXSENDBUFFER); connOptions.nReceiveFloodSize = 1000 * gArgs.GetArg("-maxreceivebuffer", DEFAULT_MAXRECEIVEBUFFER); connOptions.nMaxOutboundTimeframe = nMaxOutboundTimeframe; connOptions.nMaxOutboundLimit = nMaxOutboundLimit; if (!connman.Start(scheduler, strNodeError, connOptions)) { return InitError(strNodeError); } // Step 12: finished SetRPCWarmupFinished(); uiInterface.InitMessage(_("Done loading")); #ifdef ENABLE_WALLET for (CWalletRef pwallet : vpwallets) { pwallet->postInitProcess(scheduler); } #endif return !fRequestShutdown; } diff --git a/src/net.cpp b/src/net.cpp index 86e7154ec..bcf6078b7 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1,3053 +1,3058 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/bitcoin-config.h" #endif #include "net.h" #include "addrman.h" #include "chainparams.h" #include "clientversion.h" #include "config.h" #include "consensus/consensus.h" #include "crypto/common.h" #include "crypto/sha256.h" #include "hash.h" #include "netbase.h" #include "primitives/transaction.h" #include "scheduler.h" #include "ui_interface.h" #include "utilstrencodings.h" #ifdef WIN32 #include #else #include #endif #ifdef USE_UPNP #include #include #include #include #endif #include // Dump addresses to peers.dat and banlist.dat every 15 minutes (900s) #define DUMP_ADDRESSES_INTERVAL 900 // We add a random period time (0 to 1 seconds) to feeler connections to prevent // synchronization. #define FEELER_SLEEP_WINDOW 1 #if !defined(HAVE_MSG_NOSIGNAL) && !defined(MSG_NOSIGNAL) #define MSG_NOSIGNAL 0 #endif // Fix for ancient MinGW versions, that don't have defined these in ws2tcpip.h. // Todo: Can be removed when our pull-tester is upgraded to a modern MinGW // version. #ifdef WIN32 #ifndef PROTECTION_LEVEL_UNRESTRICTED #define PROTECTION_LEVEL_UNRESTRICTED 10 #endif #ifndef IPV6_PROTECTION_LEVEL #define IPV6_PROTECTION_LEVEL 23 #endif #endif static const std::string NET_MESSAGE_COMMAND_OTHER = "*other*"; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("localhostnonce")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // // Global state variables // bool fDiscover = true; bool fListen = true; bool fRelayTxes = true; CCriticalSection cs_mapLocalHost; std::map mapLocalHost; static bool vfLimited[NET_MAX] = {}; limitedmap mapAlreadyAskedFor(MAX_INV_SZ); // Signals for message handling static CNodeSignals g_signals; CNodeSignals &GetNodeSignals() { return g_signals; } void CConnman::AddOneShot(const std::string &strDest) { LOCK(cs_vOneShots); vOneShots.push_back(strDest); } unsigned short GetListenPort() { return (unsigned short)(gArgs.GetArg("-port", Params().GetDefaultPort())); } // find 'best' local address for a particular peer bool GetLocal(CService &addr, const CNetAddr *paddrPeer) { if (!fListen) return false; int nBestScore = -1; int nBestReachability = -1; { LOCK(cs_mapLocalHost); for (std::map::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++) { int nScore = (*it).second.nScore; int nReachability = (*it).first.GetReachabilityFrom(paddrPeer); if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { addr = CService((*it).first, (*it).second.nPort); nBestReachability = nReachability; nBestScore = nScore; } } } return nBestScore >= 0; } //! Convert the pnSeeds6 array into usable address objects. static std::vector convertSeed6(const std::vector &vSeedsIn) { // It'll only connect to one or two seed nodes because once it connects, // it'll get a pile of addresses with newer timestamps. Seed nodes are given // a random 'last seen time' of between one and two weeks ago. const int64_t nOneWeek = 7 * 24 * 60 * 60; std::vector vSeedsOut; vSeedsOut.reserve(vSeedsIn.size()); for (std::vector::const_iterator i(vSeedsIn.begin()); i != vSeedsIn.end(); ++i) { struct in6_addr ip; memcpy(&ip, i->addr, sizeof(ip)); CAddress addr(CService(ip, i->port), NODE_NETWORK); addr.nTime = GetTime() - GetRand(nOneWeek) - nOneWeek; vSeedsOut.push_back(addr); } return vSeedsOut; } // Get best local address for a particular peer as a CAddress. Otherwise, return // the unroutable 0.0.0.0 but filled in with the normal parameters, since the IP // may be changed to a useful one by discovery. CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices) { CAddress ret(CService(CNetAddr(), GetListenPort()), NODE_NONE); CService addr; if (GetLocal(addr, paddrPeer)) { ret = CAddress(addr, nLocalServices); } ret.nTime = GetAdjustedTime(); return ret; } int GetnScore(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == LOCAL_NONE) { return 0; } return mapLocalHost[addr].nScore; } // Is our peer's addrLocal potentially useful as an external IP source? bool IsPeerAddrLocalGood(CNode *pnode) { CService addrLocal = pnode->GetAddrLocal(); return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && !IsLimited(addrLocal.GetNetwork()); } // Pushes our own address to a peer. void AdvertiseLocal(CNode *pnode) { if (fListen && pnode->fSuccessfullyConnected) { CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); // If discovery is enabled, sometimes give our peer the address it tells // us that it sees us as in case it has a better idea of our address // than we do. if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || GetRand((GetnScore(addrLocal) > LOCAL_MANUAL) ? 8 : 2) == 0)) { addrLocal.SetIP(pnode->GetAddrLocal()); } if (addrLocal.IsRoutable()) { LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString()); FastRandomContext insecure_rand; pnode->PushAddress(addrLocal, insecure_rand); } } } // Learn a new local address. bool AddLocal(const CService &addr, int nScore) { if (!addr.IsRoutable()) { return false; } if (!fDiscover && nScore < LOCAL_MANUAL) { return false; } if (IsLimited(addr)) { return false; } LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore); { LOCK(cs_mapLocalHost); bool fAlready = mapLocalHost.count(addr) > 0; LocalServiceInfo &info = mapLocalHost[addr]; if (!fAlready || nScore >= info.nScore) { info.nScore = nScore + (fAlready ? 1 : 0); info.nPort = addr.GetPort(); } } return true; } bool AddLocal(const CNetAddr &addr, int nScore) { return AddLocal(CService(addr, GetListenPort()), nScore); } bool RemoveLocal(const CService &addr) { LOCK(cs_mapLocalHost); LogPrintf("RemoveLocal(%s)\n", addr.ToString()); mapLocalHost.erase(addr); return true; } /** Make a particular network entirely off-limits (no automatic connects to it) */ void SetLimited(enum Network net, bool fLimited) { if (net == NET_UNROUTABLE) { return; } LOCK(cs_mapLocalHost); vfLimited[net] = fLimited; } bool IsLimited(enum Network net) { LOCK(cs_mapLocalHost); return vfLimited[net]; } bool IsLimited(const CNetAddr &addr) { return IsLimited(addr.GetNetwork()); } /** vote for a local address */ bool SeenLocal(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) { return false; } mapLocalHost[addr].nScore++; return true; } /** check whether a given address is potentially local */ bool IsLocal(const CService &addr) { LOCK(cs_mapLocalHost); return mapLocalHost.count(addr) > 0; } /** check whether a given network is one we can probably connect to */ bool IsReachable(enum Network net) { LOCK(cs_mapLocalHost); return !vfLimited[net]; } /** check whether a given address is in a network we can probably connect to */ bool IsReachable(const CNetAddr &addr) { enum Network net = addr.GetNetwork(); return IsReachable(net); } CNode *CConnman::FindNode(const CNetAddr &ip) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if ((CNetAddr)pnode->addr == ip) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CSubNet &subNet) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subNet.Match((CNetAddr)pnode->addr)) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const std::string &addrName) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->GetAddrName() == addrName) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CService &addr) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if ((CService)pnode->addr == addr) { return pnode; } } return nullptr; } bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce) return false; } return true; } CNode *CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure) { if (pszDest == nullptr) { if (IsLocal(addrConnect)) { return nullptr; } // Look for an existing connection CNode *pnode = FindNode((CService)addrConnect); if (pnode) { LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } /// debug print LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n", pszDest ? pszDest : addrConnect.ToString(), pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0); // Connect SOCKET hSocket; bool proxyConnectionFailed = false; if (pszDest ? ConnectSocketByName(addrConnect, hSocket, pszDest, - Params().GetDefaultPort(), + config->GetChainParams().GetDefaultPort(), nConnectTimeout, &proxyConnectionFailed) : ConnectSocket(addrConnect, hSocket, nConnectTimeout, &proxyConnectionFailed)) { if (!IsSelectableSocket(hSocket)) { LogPrintf("Cannot create connection: non-selectable socket created " "(fd >= FD_SETSIZE ?)\n"); CloseSocket(hSocket); return nullptr; } if (pszDest && addrConnect.IsValid()) { // It is possible that we already have a connection to the IP/port // pszDest resolved to. In that case, drop the connection that was // just created, and return the existing CNode instead. Also store // the name we used to connect in that CNode, so that future // FindNode() calls to that name catch this early. LOCK(cs_vNodes); CNode *pnode = FindNode((CService)addrConnect); if (pnode) { pnode->MaybeSetAddrName(std::string(pszDest)); CloseSocket(hSocket); LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } addrman.Attempt(addrConnect, fCountFailure); // Add node NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); CNode *pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, pszDest ? pszDest : "", false); pnode->nServicesExpected = ServiceFlags(addrConnect.nServices & nRelevantServices); pnode->AddRef(); return pnode; } else if (!proxyConnectionFailed) { // If connecting to the node failed, and failure is not caused by a // problem connecting to the proxy, mark this as an attempt. addrman.Attempt(addrConnect, fCountFailure); } return nullptr; } void CConnman::DumpBanlist() { // Clean unused entries (if bantime has expired) SweepBanned(); if (!BannedSetIsDirty()) { return; } int64_t nStart = GetTimeMillis(); CBanDB bandb(config->GetChainParams()); banmap_t banmap; GetBanned(banmap); if (bandb.Write(banmap)) { SetBannedSetDirty(false); } LogPrint(BCLog::NET, "Flushed %d banned node ips/subnets to banlist.dat %dms\n", banmap.size(), GetTimeMillis() - nStart); } void CNode::CloseSocketDisconnect() { fDisconnect = true; LOCK(cs_hSocket); if (hSocket != INVALID_SOCKET) { LogPrint(BCLog::NET, "disconnecting peer=%d\n", id); CloseSocket(hSocket); } } void CConnman::ClearBanned() { { LOCK(cs_setBanned); setBanned.clear(); setBannedIsDirty = true; } // Store banlist to disk. DumpBanlist(); if (clientInterface) { clientInterface->BannedListChanged(); } } bool CConnman::IsBanned(CNetAddr ip) { LOCK(cs_setBanned); bool fResult = false; for (banmap_t::iterator it = setBanned.begin(); it != setBanned.end(); it++) { CSubNet subNet = (*it).first; CBanEntry banEntry = (*it).second; if (subNet.Match(ip) && GetTime() < banEntry.nBanUntil) { fResult = true; } } return fResult; } bool CConnman::IsBanned(CSubNet subnet) { LOCK(cs_setBanned); bool fResult = false; banmap_t::iterator i = setBanned.find(subnet); if (i != setBanned.end()) { CBanEntry banEntry = (*i).second; if (GetTime() < banEntry.nBanUntil) { fResult = true; } } return fResult; } void CConnman::Ban(const CNetAddr &addr, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch) { CSubNet subNet(addr); Ban(subNet, banReason, bantimeoffset, sinceUnixEpoch); } void CConnman::Ban(const CSubNet &subNet, const BanReason &banReason, int64_t bantimeoffset, bool sinceUnixEpoch) { CBanEntry banEntry(GetTime()); banEntry.banReason = banReason; if (bantimeoffset <= 0) { bantimeoffset = gArgs.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME); sinceUnixEpoch = false; } banEntry.nBanUntil = (sinceUnixEpoch ? 0 : GetTime()) + bantimeoffset; { LOCK(cs_setBanned); if (setBanned[subNet].nBanUntil < banEntry.nBanUntil) { setBanned[subNet] = banEntry; setBannedIsDirty = true; } else { return; } } if (clientInterface) { clientInterface->BannedListChanged(); } { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subNet.Match((CNetAddr)pnode->addr)) { pnode->fDisconnect = true; } } } if (banReason == BanReasonManuallyAdded) { // Store banlist to disk immediately if user requested ban. DumpBanlist(); } } bool CConnman::Unban(const CNetAddr &addr) { CSubNet subNet(addr); return Unban(subNet); } bool CConnman::Unban(const CSubNet &subNet) { { LOCK(cs_setBanned); if (!setBanned.erase(subNet)) { return false; } setBannedIsDirty = true; } if (clientInterface) { clientInterface->BannedListChanged(); } // Store banlist to disk immediately. DumpBanlist(); return true; } void CConnman::GetBanned(banmap_t &banMap) { LOCK(cs_setBanned); // Sweep the banlist so expired bans are not returned SweepBanned(); // Create a thread safe copy. banMap = setBanned; } void CConnman::SetBanned(const banmap_t &banMap) { LOCK(cs_setBanned); setBanned = banMap; setBannedIsDirty = true; } void CConnman::SweepBanned() { int64_t now = GetTime(); LOCK(cs_setBanned); banmap_t::iterator it = setBanned.begin(); while (it != setBanned.end()) { CSubNet subNet = (*it).first; CBanEntry banEntry = (*it).second; if (now > banEntry.nBanUntil) { setBanned.erase(it++); setBannedIsDirty = true; LogPrint(BCLog::NET, "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, subNet.ToString()); } else { ++it; } } } bool CConnman::BannedSetIsDirty() { LOCK(cs_setBanned); return setBannedIsDirty; } void CConnman::SetBannedSetDirty(bool dirty) { // Reuse setBanned lock for the isDirty flag. LOCK(cs_setBanned); setBannedIsDirty = dirty; } bool CConnman::IsWhitelistedRange(const CNetAddr &addr) { LOCK(cs_vWhitelistedRange); for (const CSubNet &subnet : vWhitelistedRange) { if (subnet.Match(addr)) { return true; } } return false; } void CConnman::AddWhitelistedRange(const CSubNet &subnet) { LOCK(cs_vWhitelistedRange); vWhitelistedRange.push_back(subnet); } std::string CNode::GetAddrName() const { LOCK(cs_addrName); return addrName; } void CNode::MaybeSetAddrName(const std::string &addrNameIn) { LOCK(cs_addrName); if (addrName.empty()) { addrName = addrNameIn; } } CService CNode::GetAddrLocal() const { LOCK(cs_addrLocal); return addrLocal; } void CNode::SetAddrLocal(const CService &addrLocalIn) { LOCK(cs_addrLocal); if (addrLocal.IsValid()) { error("Addr local already set for node: %i. Refusing to change from %s " "to %s", id, addrLocal.ToString(), addrLocalIn.ToString()); } else { addrLocal = addrLocalIn; } } #undef X #define X(name) stats.name = name void CNode::copyStats(CNodeStats &stats) { stats.nodeid = this->GetId(); X(nServices); X(addr); { LOCK(cs_filter); X(fRelayTxes); } X(nLastSend); X(nLastRecv); X(nTimeConnected); X(nTimeOffset); stats.addrName = GetAddrName(); X(nVersion); { LOCK(cs_SubVer); X(cleanSubVer); } X(fInbound); X(fAddnode); X(nStartingHeight); { LOCK(cs_vSend); X(mapSendBytesPerMsgCmd); X(nSendBytes); } { LOCK(cs_vRecv); X(mapRecvBytesPerMsgCmd); X(nRecvBytes); } X(fWhitelisted); // It is common for nodes with good ping times to suddenly become lagged, // due to a new block arriving or other large transfer. Merely reporting // pingtime might fool the caller into thinking the node was still // responsive, since pingtime does not update until the ping is complete, // which might take a while. So, if a ping is taking an unusually long time // in flight, the caller can immediately detect that this is happening. int64_t nPingUsecWait = 0; if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) { nPingUsecWait = GetTimeMicros() - nPingUsecStart; } // Raw ping time is in microseconds, but show it to user as whole seconds // (Bitcoin users should be well used to small numbers with many decimal // places by now :) stats.dPingTime = ((double(nPingUsecTime)) / 1e6); stats.dMinPing = ((double(nMinPingUsecTime)) / 1e6); stats.dPingWait = ((double(nPingUsecWait)) / 1e6); // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; } #undef X bool CNode::ReceiveMsgBytes(const char *pch, unsigned int nBytes, bool &complete) { complete = false; int64_t nTimeMicros = GetTimeMicros(); LOCK(cs_vRecv); nLastRecv = nTimeMicros / 1000000; nRecvBytes += nBytes; while (nBytes > 0) { // Get current incomplete message, or create a new one. if (vRecvMsg.empty() || vRecvMsg.back().complete()) { vRecvMsg.push_back(CNetMessage(Params().NetMagic(), SER_NETWORK, INIT_PROTO_VERSION)); } CNetMessage &msg = vRecvMsg.back(); // Absorb network data. int handled; if (!msg.in_data) { handled = msg.readHeader(pch, nBytes); } else { handled = msg.readData(pch, nBytes); } if (handled < 0) { return false; } if (msg.in_data && msg.hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting\n", GetId()); return false; } pch += handled; nBytes -= handled; if (msg.complete()) { // Store received bytes per message command to prevent a memory DOS, // only allow valid commands. mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(msg.hdr.pchCommand); if (i == mapRecvBytesPerMsgCmd.end()) { i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER); } assert(i != mapRecvBytesPerMsgCmd.end()); i->second += msg.hdr.nMessageSize + CMessageHeader::HEADER_SIZE; msg.nTime = nTimeMicros; complete = true; } } return true; } void CNode::SetSendVersion(int nVersionIn) { // Send version may only be changed in the version message, and only one // version message is allowed per session. We can therefore treat this value // as const and even atomic as long as it's only used once a version message // has been successfully processed. Any attempt to set this twice is an // error. if (nSendVersion != 0) { error("Send version already set for node: %i. Refusing to change from " "%i to %i", id, nSendVersion, nVersionIn); } else { nSendVersion = nVersionIn; } } int CNode::GetSendVersion() const { // The send version should always be explicitly set to INIT_PROTO_VERSION // rather than using this value until SetSendVersion has been called. if (nSendVersion == 0) { error("Requesting unset send version for node: %i. Using %i", id, INIT_PROTO_VERSION); return INIT_PROTO_VERSION; } return nSendVersion; } int CNetMessage::readHeader(const char *pch, unsigned int nBytes) { // copy data to temporary parsing buffer unsigned int nRemaining = 24 - nHdrPos; unsigned int nCopy = std::min(nRemaining, nBytes); memcpy(&hdrbuf[nHdrPos], pch, nCopy); nHdrPos += nCopy; // if header incomplete, exit if (nHdrPos < 24) { return nCopy; } // deserialize to CMessageHeader try { hdrbuf >> hdr; } catch (const std::exception &) { return -1; } // reject messages larger than MAX_SIZE if (hdr.nMessageSize > MAX_SIZE) { return -1; } // switch state to reading message data in_data = true; return nCopy; } int CNetMessage::readData(const char *pch, unsigned int nBytes) { unsigned int nRemaining = hdr.nMessageSize - nDataPos; unsigned int nCopy = std::min(nRemaining, nBytes); if (vRecv.size() < nDataPos + nCopy) { // Allocate up to 256 KiB ahead, but never more than the total message // size. vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } hasher.Write((const uint8_t *)pch, nCopy); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } const uint256 &CNetMessage::GetMessageHash() const { assert(complete()); if (data_hash.IsNull()) { hasher.Finalize(data_hash.begin()); } return data_hash; } // requires LOCK(cs_vSend) size_t CConnman::SocketSendData(CNode *pnode) const { AssertLockHeld(pnode->cs_vSend); size_t nSentSize = 0; size_t nMsgCount = 0; for (const auto &data : pnode->vSendMsg) { assert(data.size() > pnode->nSendOffset); int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { break; } nBytes = send(pnode->hSocket, reinterpret_cast(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); } if (nBytes == 0) { // couldn't send anything at all break; } if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { LogPrintf("socket send error %s\n", NetworkErrorString(nErr)); pnode->CloseSocketDisconnect(); } break; } assert(nBytes > 0); pnode->nLastSend = GetSystemTimeInSeconds(); pnode->nSendBytes += nBytes; pnode->nSendOffset += nBytes; nSentSize += nBytes; if (pnode->nSendOffset != data.size()) { // could not send full message; stop sending more break; } pnode->nSendOffset = 0; pnode->nSendSize -= data.size(); pnode->fPauseSend = pnode->nSendSize > nSendBufferMaxSize; nMsgCount++; } pnode->vSendMsg.erase(pnode->vSendMsg.begin(), pnode->vSendMsg.begin() + nMsgCount); if (pnode->vSendMsg.empty()) { assert(pnode->nSendOffset == 0); assert(pnode->nSendSize == 0); } return nSentSize; } struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; int64_t nMinPingUsecTime; int64_t nLastBlockTime; int64_t nLastTXTime; bool fRelevantServices; bool fRelayTxes; bool fBloomFilter; CAddress addr; uint64_t nKeyedNetGroup; }; static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nMinPingUsecTime > b.nMinPingUsecTime; } static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nTimeConnected > b.nTimeConnected; } static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nKeyedNetGroup < b.nKeyedNetGroup; } static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have many // peers which have not yet relayed a block. if (a.nLastBlockTime != b.nLastBlockTime) { return a.nLastBlockTime < b.nLastBlockTime; } if (a.fRelevantServices != b.fRelevantServices) { return b.fRelevantServices; } return a.nTimeConnected > b.nTimeConnected; } static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have more // than a few peers that have not yet relayed txn. if (a.nLastTXTime != b.nLastTXTime) { return a.nLastTXTime < b.nLastTXTime; } if (a.fRelayTxes != b.fRelayTxes) { return b.fRelayTxes; } if (a.fBloomFilter != b.fBloomFilter) { return a.fBloomFilter; } return a.nTimeConnected > b.nTimeConnected; } /** * Try to find a connection to evict when the node is full. Extreme care must be * taken to avoid opening the node to attacker triggered network partitioning. * The strategy used here is to protect a small number of peers for each of * several distinct characteristics which are difficult to forge. In order to * partition a node the attacker must be simultaneously better at all of them * than honest peers. */ bool CConnman::AttemptToEvictConnection() { std::vector vEvictionCandidates; { LOCK(cs_vNodes); for (CNode *node : vNodes) { if (node->fWhitelisted || !node->fInbound || node->fDisconnect) { continue; } NodeEvictionCandidate candidate = { node->id, node->nTimeConnected, node->nMinPingUsecTime, node->nLastBlockTime, node->nLastTXTime, (node->nServices & nRelevantServices) == nRelevantServices, node->fRelayTxes, node->pfilter != nullptr, node->addr, node->nKeyedNetGroup}; vEvictionCandidates.push_back(candidate); } } if (vEvictionCandidates.empty()) { return false; } // Protect connections with certain characteristics // Deterministically select 4 peers to protect by netgroup. An attacker // cannot predict which netgroups will be protected. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNetGroupKeyed); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(4, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect the 8 nodes with the lowest minimum ping time. An attacker cannot // manipulate this metric without physically moving nodes closer to the // target. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeMinPingTime); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(8, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect 4 nodes that most recently sent us transactions. An attacker // cannot manipulate this metric without performing useful work. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeTXTime); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(4, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect 4 nodes that most recently sent us blocks. An attacker cannot // manipulate this metric without performing useful work. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), CompareNodeBlockTime); vEvictionCandidates.erase( vEvictionCandidates.end() - std::min(4, static_cast(vEvictionCandidates.size())), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Protect the half of the remaining nodes which have been connected the // longest. This replicates the non-eviction implicit behavior, and // precludes attacks that start later. std::sort(vEvictionCandidates.begin(), vEvictionCandidates.end(), ReverseCompareNodeTimeConnected); vEvictionCandidates.erase( vEvictionCandidates.end() - static_cast(vEvictionCandidates.size() / 2), vEvictionCandidates.end()); if (vEvictionCandidates.empty()) { return false; } // Identify the network group with the most connections and youngest member. // (vEvictionCandidates is already sorted by reverse connect time) uint64_t naMostConnections; unsigned int nMostConnections = 0; int64_t nMostConnectionsTime = 0; std::map> mapNetGroupNodes; for (const NodeEvictionCandidate &node : vEvictionCandidates) { mapNetGroupNodes[node.nKeyedNetGroup].push_back(node); int64_t grouptime = mapNetGroupNodes[node.nKeyedNetGroup][0].nTimeConnected; size_t groupsize = mapNetGroupNodes[node.nKeyedNetGroup].size(); if (groupsize > nMostConnections || (groupsize == nMostConnections && grouptime > nMostConnectionsTime)) { nMostConnections = groupsize; nMostConnectionsTime = grouptime; naMostConnections = node.nKeyedNetGroup; } } // Reduce to the network group with the most connections vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]); // Disconnect from the network group with the most connections NodeId evicted = vEvictionCandidates.front().id; LOCK(cs_vNodes); for (std::vector::const_iterator it(vNodes.begin()); it != vNodes.end(); ++it) { if ((*it)->GetId() == evicted) { (*it)->fDisconnect = true; return true; } } return false; } void CConnman::AcceptConnection(const ListenSocket &hListenSocket) { struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr *)&sockaddr, &len); CAddress addr; int nInbound = 0; int nMaxInbound = nMaxConnections - (nMaxOutbound + nMaxFeeler); if (hSocket != INVALID_SOCKET) { if (!addr.SetSockAddr((const struct sockaddr *)&sockaddr)) { LogPrintf("Warning: Unknown socket family\n"); } } bool whitelisted = hListenSocket.whitelisted || IsWhitelistedRange(addr); { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->fInbound) { nInbound++; } } } if (hSocket == INVALID_SOCKET) { int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK) { LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr)); } return; } if (!fNetworkActive) { LogPrintf("connection from %s dropped: not accepting new connections\n", addr.ToString()); CloseSocket(hSocket); return; } if (!IsSelectableSocket(hSocket)) { LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString()); CloseSocket(hSocket); return; } // According to the internet TCP_NODELAY is not carried into accepted // sockets on all platforms. Set it again here just to be sure. int set = 1; #ifdef WIN32 setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (const char *)&set, sizeof(int)); #else setsockopt(hSocket, IPPROTO_TCP, TCP_NODELAY, (void *)&set, sizeof(int)); #endif if (IsBanned(addr) && !whitelisted) { LogPrintf("connection from %s dropped (banned)\n", addr.ToString()); CloseSocket(hSocket); return; } if (nInbound >= nMaxInbound) { if (!AttemptToEvictConnection()) { // No connection to evict, disconnect the new connection LogPrint(BCLog::NET, "failed to find an eviction candidate - " "connection dropped (full)\n"); CloseSocket(hSocket); return; } } NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); CNode *pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, "", true); pnode->AddRef(); pnode->fWhitelisted = whitelisted; GetNodeSignals().InitializeNode(*config, pnode, *this); LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString()); { LOCK(cs_vNodes); vNodes.push_back(pnode); } } void CConnman::ThreadSocketHandler() { unsigned int nPrevNodeCount = 0; while (!interruptNet) { // // Disconnect nodes // { LOCK(cs_vNodes); // Disconnect unused nodes std::vector vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { // remove from vNodes vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); // release outbound grant (if any) pnode->grantOutbound.Release(); // close socket and cleanup pnode->CloseSocketDisconnect(); // hold in disconnected pool until all refs are released pnode->Release(); vNodesDisconnected.push_back(pnode); } } } { // Delete disconnected nodes std::list vNodesDisconnectedCopy = vNodesDisconnected; for (CNode *pnode : vNodesDisconnectedCopy) { // wait until threads are done using it if (pnode->GetRefCount() <= 0) { bool fDelete = false; { TRY_LOCK(pnode->cs_inventory, lockInv); if (lockInv) { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { fDelete = true; } } } if (fDelete) { vNodesDisconnected.remove(pnode); DeleteNode(pnode); } } } } size_t vNodesSize; { LOCK(cs_vNodes); vNodesSize = vNodes.size(); } if (vNodesSize != nPrevNodeCount) { nPrevNodeCount = vNodesSize; if (clientInterface) { clientInterface->NotifyNumConnectionsChanged(nPrevNodeCount); } } // // Find which sockets have data to receive // struct timeval timeout; timeout.tv_sec = 0; // Frequency to poll pnode->vSend timeout.tv_usec = 50000; fd_set fdsetRecv; fd_set fdsetSend; fd_set fdsetError; FD_ZERO(&fdsetRecv); FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); SOCKET hSocketMax = 0; bool have_fds = false; for (const ListenSocket &hListenSocket : vhListenSocket) { FD_SET(hListenSocket.socket, &fdsetRecv); hSocketMax = std::max(hSocketMax, hListenSocket.socket); have_fds = true; } { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { // Implement the following logic: // * If there is data to send, select() for sending data. As // this only happens when optimistic write failed, we choose to // first drain the write buffer in this case before receiving // more. This avoids needlessly queueing received data, if the // remote peer is not themselves receiving data. This means // properly utilizing TCP flow control signalling. // * Otherwise, if there is space left in the receive buffer, // select() for receiving data. // * Hand off all complete messages to the processor, to be // handled without blocking here. bool select_recv = !pnode->fPauseRecv; bool select_send; { LOCK(pnode->cs_vSend); select_send = !pnode->vSendMsg.empty(); } LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } FD_SET(pnode->hSocket, &fdsetError); hSocketMax = std::max(hSocketMax, pnode->hSocket); have_fds = true; if (select_send) { FD_SET(pnode->hSocket, &fdsetSend); continue; } if (select_recv) { FD_SET(pnode->hSocket, &fdsetRecv); } } } int nSelect = select(have_fds ? hSocketMax + 1 : 0, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); if (interruptNet) { return; } if (nSelect == SOCKET_ERROR) { if (have_fds) { int nErr = WSAGetLastError(); LogPrintf("socket select error %s\n", NetworkErrorString(nErr)); for (unsigned int i = 0; i <= hSocketMax; i++) { FD_SET(i, &fdsetRecv); } } FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); if (!interruptNet.sleep_for( std::chrono::milliseconds(timeout.tv_usec / 1000))) { return; } } // // Accept new connections // for (const ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET && FD_ISSET(hListenSocket.socket, &fdsetRecv)) { AcceptConnection(hListenSocket); } } // // Service each socket // std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } for (CNode *pnode : vNodesCopy) { if (interruptNet) { return; } // // Receive // bool recvSet = false; bool sendSet = false; bool errorSet = false; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } recvSet = FD_ISSET(pnode->hSocket, &fdsetRecv); sendSet = FD_ISSET(pnode->hSocket, &fdsetSend); errorSet = FD_ISSET(pnode->hSocket, &fdsetError); } if (recvSet || errorSet) { // typical socket buffer is 8K-64K char pchBuf[0x10000]; int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); } if (nBytes > 0) { bool notify = false; if (!pnode->ReceiveMsgBytes(pchBuf, nBytes, notify)) { pnode->CloseSocketDisconnect(); } RecordBytesRecv(nBytes); if (notify) { size_t nSizeAdded = 0; auto it(pnode->vRecvMsg.begin()); for (; it != pnode->vRecvMsg.end(); ++it) { if (!it->complete()) { break; } nSizeAdded += it->vRecv.size() + CMessageHeader::HEADER_SIZE; } { LOCK(pnode->cs_vProcessMsg); pnode->vProcessMsg.splice( pnode->vProcessMsg.end(), pnode->vRecvMsg, pnode->vRecvMsg.begin(), it); pnode->nProcessQueueSize += nSizeAdded; pnode->fPauseRecv = pnode->nProcessQueueSize > nReceiveFloodSize; } WakeMessageHandler(); } } else if (nBytes == 0) { // socket closed gracefully if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "socket closed\n"); } pnode->CloseSocketDisconnect(); } else if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { if (!pnode->fDisconnect) { LogPrintf("socket recv error %s\n", NetworkErrorString(nErr)); } pnode->CloseSocketDisconnect(); } } } // // Send // if (sendSet) { LOCK(pnode->cs_vSend); size_t nBytes = SocketSendData(pnode); if (nBytes) { RecordBytesSent(nBytes); } } // // Inactivity checking // int64_t nTime = GetSystemTimeInSeconds(); if (nTime - pnode->nTimeConnected > 60) { if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) { LogPrint(BCLog::NET, "socket no message in first 60 " "seconds, %d %d from %d\n", pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->id); pnode->fDisconnect = true; } else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) { LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend); pnode->fDisconnect = true; } else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90 * 60)) { LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv); pnode->fDisconnect = true; } else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) { LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart)); pnode->fDisconnect = true; } else if (!pnode->fSuccessfullyConnected) { LogPrintf("version handshake timeout from %d\n", pnode->id); pnode->fDisconnect = true; } } } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } } } void CConnman::WakeMessageHandler() { { std::lock_guard lock(mutexMsgProc); fMsgProcWake = true; } condMsgProc.notify_one(); } #ifdef USE_UPNP void ThreadMapPort() { std::string port = strprintf("%u", GetListenPort()); const char *multicastif = 0; const char *minissdpdpath = 0; struct UPNPDev *devlist = 0; char lanaddr[64]; #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0); #elif MINIUPNPC_API_VERSION < 14 /* miniupnpc 1.6 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error); #else /* miniupnpc 1.9.20150730 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error); #endif struct UPNPUrls urls; struct IGDdatas data; int r; r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr)); if (r == 1) { if (fDiscover) { char externalIPAddress[40]; r = UPNP_GetExternalIPAddress( urls.controlURL, data.first.servicetype, externalIPAddress); if (r != UPNPCOMMAND_SUCCESS) { LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r); } else { if (externalIPAddress[0]) { CNetAddr resolved; if (LookupHost(externalIPAddress, resolved, false)) { LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToString().c_str()); AddLocal(resolved, LOCAL_UPNP); } } else { LogPrintf("UPnP: GetExternalIPAddress failed.\n"); } } } std::string strDesc = "Bitcoin " + FormatFullVersion(); try { while (true) { #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0); #else /* miniupnpc 1.6 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); #endif if (r != UPNPCOMMAND_SUCCESS) { LogPrintf( "AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port, port, lanaddr, r, strupnperror(r)); } else { LogPrintf("UPnP Port Mapping successful.\n"); } // Refresh every 20 minutes MilliSleep(20 * 60 * 1000); } } catch (const boost::thread_interrupted &) { r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0); LogPrintf("UPNP_DeletePortMapping() returned: %d\n", r); freeUPNPDevlist(devlist); devlist = 0; FreeUPNPUrls(&urls); throw; } } else { LogPrintf("No valid UPnP IGDs found\n"); freeUPNPDevlist(devlist); devlist = 0; if (r != 0) { FreeUPNPUrls(&urls); } } } void MapPort(bool fUseUPnP) { static boost::thread *upnp_thread = nullptr; if (fUseUPnP) { if (upnp_thread) { upnp_thread->interrupt(); upnp_thread->join(); delete upnp_thread; } upnp_thread = new boost::thread( boost::bind(&TraceThread, "upnp", &ThreadMapPort)); } else if (upnp_thread) { upnp_thread->interrupt(); upnp_thread->join(); delete upnp_thread; upnp_thread = nullptr; } } #else void MapPort(bool) { // Intentionally left blank. } #endif static std::string GetDNSHost(const CDNSSeedData &data, ServiceFlags *requiredServiceBits) { // use default host for non-filter-capable seeds or if we use the default // service bits (NODE_NETWORK) if (!data.supportsServiceBitsFiltering || *requiredServiceBits == NODE_NETWORK) { *requiredServiceBits = NODE_NETWORK; return data.host; } // See chainparams.cpp, most dnsseeds only support one or two possible // servicebits hostnames return strprintf("x%x.%s", *requiredServiceBits, data.host); } void CConnman::ThreadDNSAddressSeed() { // goal: only query DNS seeds if address need is acute. // Avoiding DNS seeds when we don't need them improves user privacy by // creating fewer identifying DNS requests, reduces trust by giving seeds // less influence on the network topology, and reduces traffic to the seeds. if ((addrman.size() > 0) && (!gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED))) { if (!interruptNet.sleep_for(std::chrono::seconds(11))) { return; } LOCK(cs_vNodes); int nRelevant = 0; for (auto pnode : vNodes) { nRelevant += pnode->fSuccessfullyConnected && ((pnode->nServices & nRelevantServices) == nRelevantServices); } if (nRelevant >= 2) { LogPrintf("P2P peers available. Skipped DNS seeding.\n"); return; } } - const std::vector &vSeeds = Params().DNSSeeds(); + const std::vector &vSeeds = + config->GetChainParams().DNSSeeds(); int found = 0; LogPrintf("Loading addresses from DNS seeds (could take a while)\n"); for (const CDNSSeedData &seed : vSeeds) { if (HaveNameProxy()) { AddOneShot(seed.host); } else { std::vector vIPs; std::vector vAdd; ServiceFlags requiredServiceBits = nRelevantServices; if (LookupHost(GetDNSHost(seed, &requiredServiceBits).c_str(), vIPs, 0, true)) { for (const CNetAddr &ip : vIPs) { int nOneDay = 24 * 3600; - CAddress addr = - CAddress(CService(ip, Params().GetDefaultPort()), - requiredServiceBits); + CAddress addr = CAddress( + CService(ip, config->GetChainParams().GetDefaultPort()), + requiredServiceBits); // Use a random age between 3 and 7 days old. addr.nTime = GetTime() - 3 * nOneDay - GetRand(4 * nOneDay); vAdd.push_back(addr); found++; } } // TODO: The seed name resolve may fail, yielding an IP of [::], // which results in addrman assigning the same source to results // from different seeds. This should switch to a hard-coded stable // dummy IP for each seed name, so that the resolve is not required // at all. if (!vIPs.empty()) { CService seedSource; Lookup(seed.name.c_str(), seedSource, 0, true); addrman.Add(vAdd, seedSource); } } } LogPrintf("%d addresses found from DNS seeds\n", found); } void CConnman::DumpAddresses() { int64_t nStart = GetTimeMillis(); CAddrDB adb(config->GetChainParams()); adb.Write(addrman); LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } void CConnman::DumpData() { DumpAddresses(); DumpBanlist(); } void CConnman::ProcessOneShot() { std::string strDest; { LOCK(cs_vOneShots); if (vOneShots.empty()) { return; } strDest = vOneShots.front(); vOneShots.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { if (!OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true)) { AddOneShot(strDest); } } } void CConnman::ThreadOpenConnections() { // Connect to specific addresses if (gArgs.IsArgSet("-connect") && gArgs.GetArgs("-connect").size() > 0) { for (int64_t nLoop = 0;; nLoop++) { ProcessOneShot(); for (const std::string &strAddr : gArgs.GetArgs("-connect")) { CAddress addr(CService(), NODE_NONE); OpenNetworkConnection(addr, false, nullptr, strAddr.c_str()); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for( std::chrono::milliseconds(500))) { return; } } } if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Initiate network connections int64_t nStart = GetTime(); // Minimum time before next feeler connection (in microseconds). int64_t nNextFeeler = PoissonNextSend(nStart * 1000 * 1000, FEELER_INTERVAL); while (!interruptNet) { ProcessOneShot(); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } CSemaphoreGrant grant(*semOutbound); if (interruptNet) { return; } // Add seed nodes if DNS seeds are all down (an infrastructure attack?). if (addrman.size() == 0 && (GetTime() - nStart > 60)) { static bool done = false; if (!done) { LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be " "available.\n"); CNetAddr local; LookupHost("127.0.0.1", local, false); - addrman.Add(convertSeed6(Params().FixedSeeds()), local); + addrman.Add(convertSeed6(config->GetChainParams().FixedSeeds()), + local); done = true; } } // // Choose an address to connect to based on most recently seen // CAddress addrConnect; // Only connect out to one peer per network group (/16 for IPv4). Do // this here so we don't have to critsect vNodes inside mapAddresses // critsect. int nOutbound = 0; std::set> setConnected; { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (!pnode->fInbound && !pnode->fAddnode) { // Netgroups for inbound and addnode peers are not excluded // because our goal here is to not use multiple of our // limited outbound slots on a single netgroup but inbound // and addnode peers do not use our outbound slots. Inbound // peers also have the added issue that they're attacker // controlled and could be used to prevent us from // connecting to particular hosts if we used them here. setConnected.insert(pnode->addr.GetGroup()); nOutbound++; } } } // Feeler Connections // // Design goals: // * Increase the number of connectable addresses in the tried table. // // Method: // * Choose a random address from new and attempt to connect to it if // we can connect successfully it is added to tried. // * Start attempting feeler connections only after node finishes // making outbound connections. // * Only make a feeler connection once every few minutes. // bool fFeeler = false; if (nOutbound >= nMaxOutbound) { // The current time right now (in microseconds). int64_t nTime = GetTimeMicros(); if (nTime > nNextFeeler) { nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL); fFeeler = true; } else { continue; } } int64_t nANow = GetAdjustedTime(); int nTries = 0; while (!interruptNet) { CAddrInfo addr = addrman.Select(fFeeler); // if we selected an invalid address, restart if (!addr.IsValid() || setConnected.count(addr.GetGroup()) || IsLocal(addr)) { break; } // If we didn't find an appropriate destination after trying 100 // addresses fetched from addrman, stop this loop, and let the outer // loop run again (which sleeps, adds seed nodes, recalculates // already-connected network ranges, ...) before trying new addrman // addresses. nTries++; if (nTries > 100) { break; } if (IsLimited(addr)) { continue; } // only connect to full nodes if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) { continue; } // only consider very recently tried nodes after 30 failed attempts if (nANow - addr.nLastTry < 600 && nTries < 30) { continue; } // only consider nodes missing relevant services after 40 failed // attempts and only if less than half the outbound are up. if ((addr.nServices & nRelevantServices) != nRelevantServices && (nTries < 40 || nOutbound >= (nMaxOutbound >> 1))) { continue; } // do not allow non-default ports, unless after 50 invalid addresses // selected already. - if (addr.GetPort() != Params().GetDefaultPort() && nTries < 50) { + if (addr.GetPort() != config->GetChainParams().GetDefaultPort() && + nTries < 50) { continue; } addrConnect = addr; break; } if (addrConnect.IsValid()) { if (fFeeler) { // Add small amount of random noise before connection to avoid // synchronization. int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000); if (!interruptNet.sleep_for( std::chrono::milliseconds(randsleep))) { return; } LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString()); } OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler); } } } std::vector CConnman::GetAddedNodeInfo() { std::vector ret; std::list lAddresses(0); { LOCK(cs_vAddedNodes); ret.reserve(vAddedNodes.size()); for (const std::string &strAddNode : vAddedNodes) { lAddresses.push_back(strAddNode); } } // Build a map of all already connected addresses (by IP:port and by name) // to inbound/outbound and resolved CService std::map mapConnected; std::map> mapConnectedByName; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->addr.IsValid()) { mapConnected[pnode->addr] = pnode->fInbound; } std::string addrName = pnode->GetAddrName(); if (!addrName.empty()) { mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast(pnode->addr)); } } } for (const std::string &strAddNode : lAddresses) { - CService service( - LookupNumeric(strAddNode.c_str(), Params().GetDefaultPort())); + CService service(LookupNumeric( + strAddNode.c_str(), config->GetChainParams().GetDefaultPort())); if (service.IsValid()) { // strAddNode is an IP:port auto it = mapConnected.find(service); if (it != mapConnected.end()) { ret.push_back( AddedNodeInfo{strAddNode, service, true, it->second}); } else { ret.push_back( AddedNodeInfo{strAddNode, CService(), false, false}); } } else { // strAddNode is a name auto it = mapConnectedByName.find(strAddNode); if (it != mapConnectedByName.end()) { ret.push_back(AddedNodeInfo{strAddNode, it->second.second, true, it->second.first}); } else { ret.push_back( AddedNodeInfo{strAddNode, CService(), false, false}); } } } return ret; } void CConnman::ThreadOpenAddedConnections() { { LOCK(cs_vAddedNodes); if (gArgs.IsArgSet("-addnode")) { vAddedNodes = gArgs.GetArgs("-addnode"); } } while (true) { CSemaphoreGrant grant(*semAddnode); std::vector vInfo = GetAddedNodeInfo(); bool tried = false; for (const AddedNodeInfo &info : vInfo) { if (!info.fConnected) { if (!grant.TryAcquire()) { // If we've used up our semaphore and need a new one, lets // not wait here since while we are waiting the // addednodeinfo state might change. break; } // If strAddedNode is an IP/port, decode it immediately, so // OpenNetworkConnection can detect existing connections to that // IP/port. tried = true; - CService service(LookupNumeric(info.strAddedNode.c_str(), - Params().GetDefaultPort())); + CService service( + LookupNumeric(info.strAddedNode.c_str(), + config->GetChainParams().GetDefaultPort())); OpenNetworkConnection(CAddress(service, NODE_NONE), false, &grant, info.strAddedNode.c_str(), false, false, true); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Retry every 60 seconds if a connection was attempted, otherwise two // seconds. if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) { return; } } } // If successful, this moves the passed grant to the constructed node. bool CConnman::OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool fAddnode) { // // Initiate outbound network connection // if (interruptNet) { return false; } if (!fNetworkActive) { return false; } if (!pszDest) { if (IsLocal(addrConnect) || FindNode((CNetAddr)addrConnect) || IsBanned(addrConnect) || FindNode(addrConnect.ToStringIPPort())) { return false; } } else if (FindNode(std::string(pszDest))) { return false; } CNode *pnode = ConnectNode(addrConnect, pszDest, fCountFailure); if (!pnode) { return false; } if (grantOutbound) { grantOutbound->MoveTo(pnode->grantOutbound); } if (fOneShot) { pnode->fOneShot = true; } if (fFeeler) { pnode->fFeeler = true; } if (fAddnode) { pnode->fAddnode = true; } GetNodeSignals().InitializeNode(*config, pnode, *this); { LOCK(cs_vNodes); vNodes.push_back(pnode); } return true; } void CConnman::ThreadMessageHandler() { while (!flagInterruptMsgProc) { std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } bool fMoreWork = false; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { continue; } // Receive messages bool fMoreNodeWork = GetNodeSignals().ProcessMessages( *config, pnode, *this, flagInterruptMsgProc); fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend); if (flagInterruptMsgProc) { return; } // Send messages { LOCK(pnode->cs_sendProcessing); GetNodeSignals().SendMessages(*config, pnode, *this, flagInterruptMsgProc); } if (flagInterruptMsgProc) { return; } } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } std::unique_lock lock(mutexMsgProc); if (!fMoreWork) { condMsgProc.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds(100), [this] { return fMsgProcWake; }); } fMsgProcWake = false; } } bool CConnman::BindListenPort(const CService &addrBind, std::string &strError, bool fWhitelisted) { strError = ""; int nOne = 1; // Create socket for listening for incoming connections struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); if (!addrBind.GetSockAddr((struct sockaddr *)&sockaddr, &len)) { strError = strprintf("Error: Bind address family for %s not supported", addrBind.ToString()); LogPrintf("%s\n", strError); return false; } SOCKET hListenSocket = socket(((struct sockaddr *)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP); if (hListenSocket == INVALID_SOCKET) { strError = strprintf("Error: Couldn't open socket for incoming " "connections (socket returned error %s)", NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); return false; } if (!IsSelectableSocket(hListenSocket)) { strError = "Error: Couldn't create a listenable socket for incoming " "connections"; LogPrintf("%s\n", strError); return false; } #ifndef WIN32 #ifdef SO_NOSIGPIPE // Different way of disabling SIGPIPE on BSD setsockopt(hListenSocket, SOL_SOCKET, SO_NOSIGPIPE, (void *)&nOne, sizeof(int)); #endif // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (void *)&nOne, sizeof(int)); // Disable Nagle's algorithm setsockopt(hListenSocket, IPPROTO_TCP, TCP_NODELAY, (void *)&nOne, sizeof(int)); #else setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (const char *)&nOne, sizeof(int)); setsockopt(hListenSocket, IPPROTO_TCP, TCP_NODELAY, (const char *)&nOne, sizeof(int)); #endif // Set to non-blocking, incoming connections will also inherit this if (!SetSocketNonBlocking(hListenSocket, true)) { strError = strprintf("BindListenPort: Setting listening socket to " "non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); return false; } // Some systems don't have IPV6_V6ONLY but are always v6only; others do have // the option and enable it by default or not. Try to enable it, if // possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY #ifdef WIN32 setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (const char *)&nOne, sizeof(int)); #else setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (void *)&nOne, sizeof(int)); #endif #endif #ifdef WIN32 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char *)&nProtLevel, sizeof(int)); #endif } if (::bind(hListenSocket, (struct sockaddr *)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) { strError = strprintf(_("Unable to bind to %s on this computer. %s " "is probably already running."), addrBind.ToString(), _(PACKAGE_NAME)); } else { strError = strprintf(_("Unable to bind to %s on this computer " "(bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr)); } LogPrintf("%s\n", strError); CloseSocket(hListenSocket); return false; } LogPrintf("Bound to %s\n", addrBind.ToString()); // Listen for incoming connections if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) { strError = strprintf(_("Error: Listening for incoming connections " "failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); CloseSocket(hListenSocket); return false; } vhListenSocket.push_back(ListenSocket(hListenSocket, fWhitelisted)); if (addrBind.IsRoutable() && fDiscover && !fWhitelisted) { AddLocal(addrBind, LOCAL_BIND); } return true; } void Discover(boost::thread_group &threadGroup) { if (!fDiscover) { return; } #ifdef WIN32 // Get local host IP char pszHostName[256] = ""; if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) { std::vector vaddr; if (LookupHost(pszHostName, vaddr, 0, true)) { for (const CNetAddr &addr : vaddr) { if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString()); } } } } #else // Get local host ip struct ifaddrs *myaddrs; if (getifaddrs(&myaddrs) == 0) { for (struct ifaddrs *ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next) { if (ifa->ifa_addr == nullptr || (ifa->ifa_flags & IFF_UP) == 0 || strcmp(ifa->ifa_name, "lo") == 0 || strcmp(ifa->ifa_name, "lo0") == 0) { continue; } if (ifa->ifa_addr->sa_family == AF_INET) { struct sockaddr_in *s4 = (struct sockaddr_in *)(ifa->ifa_addr); CNetAddr addr(s4->sin_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } else if (ifa->ifa_addr->sa_family == AF_INET6) { struct sockaddr_in6 *s6 = (struct sockaddr_in6 *)(ifa->ifa_addr); CNetAddr addr(s6->sin6_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } } freeifaddrs(myaddrs); } #endif } void CConnman::SetNetworkActive(bool active) { LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active); if (!active) { fNetworkActive = false; LOCK(cs_vNodes); // Close sockets to all nodes for (CNode *pnode : vNodes) { pnode->CloseSocketDisconnect(); } } else { fNetworkActive = true; } uiInterface.NotifyNetworkActiveChanged(fNetworkActive); } CConnman::CConnman(const Config &configIn, uint64_t nSeed0In, uint64_t nSeed1In) : config(&configIn), nSeed0(nSeed0In), nSeed1(nSeed1In) { fNetworkActive = true; setBannedIsDirty = false; fAddressesInitialized = false; nLastNodeId = 0; nSendBufferMaxSize = 0; nReceiveFloodSize = 0; semOutbound = nullptr; semAddnode = nullptr; nMaxConnections = 0; nMaxOutbound = 0; nMaxAddnode = 0; nBestHeight = 0; clientInterface = nullptr; flagInterruptMsgProc = false; } NodeId CConnman::GetNewNodeId() { return nLastNodeId.fetch_add(1, std::memory_order_relaxed); } bool CConnman::Start(CScheduler &scheduler, std::string &strNodeError, Options connOptions) { nTotalBytesRecv = 0; nTotalBytesSent = 0; nMaxOutboundTotalBytesSentInCycle = 0; nMaxOutboundCycleStartTime = 0; nRelevantServices = connOptions.nRelevantServices; nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; nMaxOutbound = std::min((connOptions.nMaxOutbound), nMaxConnections); nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; SetBestHeight(connOptions.nBestHeight); clientInterface = connOptions.uiInterface; if (clientInterface) { clientInterface->InitMessage(_("Loading addresses...")); } // Load addresses from peers.dat int64_t nStart = GetTimeMillis(); { CAddrDB adb(config->GetChainParams()); if (adb.Read(addrman)) { LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } else { // Addrman can be in an inconsistent state after failure, reset it addrman.Clear(); LogPrintf("Invalid or missing peers.dat; recreating\n"); DumpAddresses(); } } if (clientInterface) { clientInterface->InitMessage(_("Loading banlist...")); } // Load addresses from banlist.dat nStart = GetTimeMillis(); CBanDB bandb(config->GetChainParams()); banmap_t banmap; if (bandb.Read(banmap)) { // thread save setter SetBanned(banmap); // no need to write down, just read data SetBannedSetDirty(false); // sweep out unused entries SweepBanned(); LogPrint(BCLog::NET, "Loaded %d banned node ips/subnets from banlist.dat %dms\n", banmap.size(), GetTimeMillis() - nStart); } else { LogPrintf("Invalid or missing banlist.dat; recreating\n"); // force write SetBannedSetDirty(true); DumpBanlist(); } uiInterface.InitMessage(_("Starting network threads...")); fAddressesInitialized = true; if (semOutbound == nullptr) { // initialize semaphore semOutbound = new CSemaphore( std::min((nMaxOutbound + nMaxFeeler), nMaxConnections)); } if (semAddnode == nullptr) { // initialize semaphore semAddnode = new CSemaphore(nMaxAddnode); } // // Start threads // InterruptSocks5(false); interruptNet.reset(); flagInterruptMsgProc = false; { std::unique_lock lock(mutexMsgProc); fMsgProcWake = false; } // Send and receive from sockets, accept connections threadSocketHandler = std::thread( &TraceThread>, "net", std::function(std::bind(&CConnman::ThreadSocketHandler, this))); if (!gArgs.GetBoolArg("-dnsseed", true)) { LogPrintf("DNS seeding disabled\n"); } else { threadDNSAddressSeed = std::thread(&TraceThread>, "dnsseed", std::function( std::bind(&CConnman::ThreadDNSAddressSeed, this))); } // Initiate outbound connections from -addnode threadOpenAddedConnections = std::thread(&TraceThread>, "addcon", std::function(std::bind( &CConnman::ThreadOpenAddedConnections, this))); // Initiate outbound connections unless connect=0 if (!gArgs.IsArgSet("-connect") || gArgs.GetArgs("-connect").size() != 1 || gArgs.GetArgs("-connect")[0] != "0") { threadOpenConnections = std::thread(&TraceThread>, "opencon", std::function( std::bind(&CConnman::ThreadOpenConnections, this))); } // Process messages threadMessageHandler = std::thread(&TraceThread>, "msghand", std::function( std::bind(&CConnman::ThreadMessageHandler, this))); // Dump network addresses scheduler.scheduleEvery(std::bind(&CConnman::DumpData, this), DUMP_ADDRESSES_INTERVAL * 1000); return true; } class CNetCleanup { public: CNetCleanup() {} ~CNetCleanup() { #ifdef WIN32 // Shutdown Windows Sockets WSACleanup(); #endif } } instance_of_cnetcleanup; void CConnman::Interrupt() { { std::lock_guard lock(mutexMsgProc); flagInterruptMsgProc = true; } condMsgProc.notify_all(); interruptNet(); InterruptSocks5(true); if (semOutbound) { for (int i = 0; i < (nMaxOutbound + nMaxFeeler); i++) { semOutbound->post(); } } if (semAddnode) { for (int i = 0; i < nMaxAddnode; i++) { semAddnode->post(); } } } void CConnman::Stop() { if (threadMessageHandler.joinable()) { threadMessageHandler.join(); } if (threadOpenConnections.joinable()) { threadOpenConnections.join(); } if (threadOpenAddedConnections.joinable()) { threadOpenAddedConnections.join(); } if (threadDNSAddressSeed.joinable()) { threadDNSAddressSeed.join(); } if (threadSocketHandler.joinable()) { threadSocketHandler.join(); } if (fAddressesInitialized) { DumpData(); fAddressesInitialized = false; } // Close sockets for (CNode *pnode : vNodes) { pnode->CloseSocketDisconnect(); } for (ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET) { if (!CloseSocket(hListenSocket.socket)) { LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError())); } } } // clean up some globals (to help leak detection) for (CNode *pnode : vNodes) { DeleteNode(pnode); } for (CNode *pnode : vNodesDisconnected) { DeleteNode(pnode); } vNodes.clear(); vNodesDisconnected.clear(); vhListenSocket.clear(); delete semOutbound; semOutbound = nullptr; delete semAddnode; semAddnode = nullptr; } void CConnman::DeleteNode(CNode *pnode) { assert(pnode); bool fUpdateConnectionTime = false; GetNodeSignals().FinalizeNode(pnode->GetId(), fUpdateConnectionTime); if (fUpdateConnectionTime) { addrman.Connected(pnode->addr); } delete pnode; } CConnman::~CConnman() { Interrupt(); Stop(); } size_t CConnman::GetAddressCount() const { return addrman.size(); } void CConnman::SetServices(const CService &addr, ServiceFlags nServices) { addrman.SetServices(addr, nServices); } void CConnman::MarkAddressGood(const CAddress &addr) { addrman.Good(addr); } void CConnman::AddNewAddress(const CAddress &addr, const CAddress &addrFrom, int64_t nTimePenalty) { addrman.Add(addr, addrFrom, nTimePenalty); } void CConnman::AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty) { addrman.Add(vAddr, addrFrom, nTimePenalty); } std::vector CConnman::GetAddresses() { return addrman.GetAddr(); } bool CConnman::AddNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (std::vector::const_iterator it = vAddedNodes.begin(); it != vAddedNodes.end(); ++it) { if (strNode == *it) { return false; } } vAddedNodes.push_back(strNode); return true; } bool CConnman::RemoveAddedNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (std::vector::iterator it = vAddedNodes.begin(); it != vAddedNodes.end(); ++it) { if (strNode == *it) { vAddedNodes.erase(it); return true; } } return false; } size_t CConnman::GetNodeCount(NumConnections flags) { LOCK(cs_vNodes); // Shortcut if we want total if (flags == CConnman::CONNECTIONS_ALL) { return vNodes.size(); } int nNum = 0; for (std::vector::const_iterator it = vNodes.begin(); it != vNodes.end(); ++it) { if (flags & ((*it)->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) { nNum++; } } return nNum; } void CConnman::GetNodeStats(std::vector &vstats) { vstats.clear(); LOCK(cs_vNodes); vstats.reserve(vNodes.size()); for (CNode *pnode : vNodes) { vstats.emplace_back(); pnode->copyStats(vstats.back()); } } bool CConnman::DisconnectNode(const std::string &strNode) { LOCK(cs_vNodes); if (CNode *pnode = FindNode(strNode)) { pnode->fDisconnect = true; return true; } return false; } bool CConnman::DisconnectNode(NodeId id) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (id == pnode->id) { pnode->fDisconnect = true; return true; } } return false; } void CConnman::RecordBytesRecv(uint64_t bytes) { LOCK(cs_totalBytesRecv); nTotalBytesRecv += bytes; } void CConnman::RecordBytesSent(uint64_t bytes) { LOCK(cs_totalBytesSent); nTotalBytesSent += bytes; uint64_t now = GetTime(); if (nMaxOutboundCycleStartTime + nMaxOutboundTimeframe < now) { // timeframe expired, reset cycle nMaxOutboundCycleStartTime = now; nMaxOutboundTotalBytesSentInCycle = 0; } // TODO, exclude whitebind peers nMaxOutboundTotalBytesSentInCycle += bytes; } void CConnman::SetMaxOutboundTarget(uint64_t limit) { LOCK(cs_totalBytesSent); nMaxOutboundLimit = limit; } uint64_t CConnman::GetMaxOutboundTarget() { LOCK(cs_totalBytesSent); return nMaxOutboundLimit; } uint64_t CConnman::GetMaxOutboundTimeframe() { LOCK(cs_totalBytesSent); return nMaxOutboundTimeframe; } uint64_t CConnman::GetMaxOutboundTimeLeftInCycle() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } if (nMaxOutboundCycleStartTime == 0) { return nMaxOutboundTimeframe; } uint64_t cycleEndTime = nMaxOutboundCycleStartTime + nMaxOutboundTimeframe; uint64_t now = GetTime(); return (cycleEndTime < now) ? 0 : cycleEndTime - GetTime(); } void CConnman::SetMaxOutboundTimeframe(uint64_t timeframe) { LOCK(cs_totalBytesSent); if (nMaxOutboundTimeframe != timeframe) { // reset measure-cycle in case of changing the timeframe. nMaxOutboundCycleStartTime = GetTime(); } nMaxOutboundTimeframe = timeframe; } bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return false; } if (historicalBlockServingLimit) { // keep a large enough buffer to at least relay each block once. uint64_t timeLeftInCycle = GetMaxOutboundTimeLeftInCycle(); uint64_t buffer = timeLeftInCycle / 600 * ONE_MEGABYTE; if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) { return true; } } else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) { return true; } return false; } uint64_t CConnman::GetOutboundTargetBytesLeft() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle; } uint64_t CConnman::GetTotalBytesRecv() { LOCK(cs_totalBytesRecv); return nTotalBytesRecv; } uint64_t CConnman::GetTotalBytesSent() { LOCK(cs_totalBytesSent); return nTotalBytesSent; } ServiceFlags CConnman::GetLocalServices() const { return nLocalServices; } void CConnman::SetBestHeight(int height) { nBestHeight.store(height, std::memory_order_release); } int CConnman::GetBestHeight() const { return nBestHeight.load(std::memory_order_acquire); } unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } unsigned int CConnman::GetSendBufferSize() const { return nSendBufferMaxSize; } CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const std::string &addrNameIn, bool fInboundIn) : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), fInbound(fInboundIn), id(idIn), nKeyedNetGroup(nKeyedNetGroupIn), addrKnown(5000, 0.001), filterInventoryKnown(50000, 0.000001), nLocalHostNonce(nLocalHostNonceIn), nLocalServices(nLocalServicesIn), nMyStartingHeight(nMyStartingHeightIn), nSendVersion(0) { nServices = NODE_NONE; nServicesExpected = NODE_NONE; hSocket = hSocketIn; nRecvVersion = INIT_PROTO_VERSION; nLastSend = 0; nLastRecv = 0; nSendBytes = 0; nRecvBytes = 0; nTimeOffset = 0; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; nVersion = 0; strSubVer = ""; fWhitelisted = false; fOneShot = false; fAddnode = false; // set by version message fClient = false; fFeeler = false; fSuccessfullyConnected = false; fDisconnect = false; nRefCount = 0; nSendSize = 0; nSendOffset = 0; hashContinue = uint256(); nStartingHeight = -1; filterInventoryKnown.reset(); fSendMempool = false; fGetAddr = false; nNextLocalAddrSend = 0; nNextAddrSend = 0; nNextInvSend = 0; fRelayTxes = false; fSentAddr = false; pfilter = new CBloomFilter(); timeLastMempoolReq = 0; nLastBlockTime = 0; nLastTXTime = 0; nPingNonceSent = 0; nPingUsecStart = 0; nPingUsecTime = 0; fPingQueued = false; nMinPingUsecTime = std::numeric_limits::max(); minFeeFilter = Amount(0); lastSentFeeFilter = Amount(0); nextSendTimeFeeFilter = 0; fPauseRecv = false; fPauseSend = false; nProcessQueueSize = 0; for (const std::string &msg : getAllNetMessageTypes()) { mapRecvBytesPerMsgCmd[msg] = 0; } mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0; if (fLogIPs) { LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id); } else { LogPrint(BCLog::NET, "Added connection peer=%d\n", id); } } CNode::~CNode() { CloseSocket(hSocket); if (pfilter) { delete pfilter; } } void CNode::AskFor(const CInv &inv) { if (mapAskFor.size() > MAPASKFOR_MAX_SZ || setAskFor.size() > SETASKFOR_MAX_SZ) { return; } // a peer may not have multiple non-responded queue positions for a single // inv item. if (!setAskFor.insert(inv.hash).second) { return; } // We're using mapAskFor as a priority queue, the key is the earliest time // the request can be sent. int64_t nRequestTime; limitedmap::const_iterator it = mapAlreadyAskedFor.find(inv.hash); if (it != mapAlreadyAskedFor.end()) { nRequestTime = it->second; } else { nRequestTime = 0; } LogPrint(BCLog::NET, "askfor %s %d (%s) peer=%d\n", inv.ToString(), nRequestTime, DateTimeStrFormat("%H:%M:%S", nRequestTime / 1000000), id); // Make sure not to reuse time indexes to keep things in the same order int64_t nNow = GetTimeMicros() - 1000000; static int64_t nLastTime; ++nLastTime; nNow = std::max(nNow, nLastTime); nLastTime = nNow; // Each retry is 2 minutes after the last nRequestTime = std::max(nRequestTime + 2 * 60 * 1000000, nNow); if (it != mapAlreadyAskedFor.end()) { mapAlreadyAskedFor.update(it, nRequestTime); } else { mapAlreadyAskedFor.insert(std::make_pair(inv.hash, nRequestTime)); } mapAskFor.insert(std::make_pair(nRequestTime, inv)); } bool CConnman::NodeFullyConnected(const CNode *pnode) { return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect; } void CConnman::PushMessage(CNode *pnode, CSerializedNetMsg &&msg) { size_t nMessageSize = msg.data.size(); size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE; LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command.c_str()), nMessageSize, pnode->id); std::vector serializedHeader; serializedHeader.reserve(CMessageHeader::HEADER_SIZE); uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize); - CMessageHeader hdr(Params().NetMagic(), msg.command.c_str(), nMessageSize); + CMessageHeader hdr(config->GetChainParams().NetMagic(), msg.command.c_str(), + nMessageSize); memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE); CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr}; size_t nBytesSent = 0; { LOCK(pnode->cs_vSend); bool optimisticSend(pnode->vSendMsg.empty()); // log total amount of bytes per command pnode->mapSendBytesPerMsgCmd[msg.command] += nTotalSize; pnode->nSendSize += nTotalSize; if (pnode->nSendSize > nSendBufferMaxSize) { pnode->fPauseSend = true; } pnode->vSendMsg.push_back(std::move(serializedHeader)); if (nMessageSize) { pnode->vSendMsg.push_back(std::move(msg.data)); } // If write queue empty, attempt "optimistic write" if (optimisticSend == true) { nBytesSent = SocketSendData(pnode); } } if (nBytesSent) { RecordBytesSent(nBytesSent); } } bool CConnman::ForNode(NodeId id, std::function func) { CNode *found = nullptr; LOCK(cs_vNodes); for (auto &&pnode : vNodes) { if (pnode->id == id) { found = pnode; break; } } return found != nullptr && NodeFullyConnected(found) && func(found); } int64_t PoissonNextSend(int64_t nNow, int average_interval_seconds) { return nNow + int64_t(log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */) * average_interval_seconds * -1000000.0 + 0.5); } CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const { return CSipHasher(nSeed0, nSeed1).Write(id); } uint64_t CConnman::CalculateKeyedNetGroup(const CAddress &ad) const { std::vector vchNetGroup(ad.GetGroup()); return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP) .Write(&vchNetGroup[0], vchNetGroup.size()) .Finalize(); } /** * This function convert MaxBlockSize from byte to * MB with a decimal precision one digit rounded down * E.g. * 1660000 -> 1.6 * 2010000 -> 2.0 * 1000000 -> 1.0 * 230000 -> 0.2 * 50000 -> 0.0 * * NB behavior for EB<1MB not standardized yet still * the function applies the same algo used for * EB greater or equal to 1MB */ std::string getSubVersionEB(uint64_t MaxBlockSize) { // Prepare EB string we are going to add to SubVer: // 1) translate from byte to MB and convert to string // 2) limit the EB string to the first decimal digit (floored) std::stringstream ebMBs; ebMBs << (MaxBlockSize / (ONE_MEGABYTE / 10)); std::string eb = ebMBs.str(); eb.insert(eb.size() - 1, ".", 1); if (eb.substr(0, 1) == ".") { eb = "0" + eb; } return eb; } std::string userAgent(const Config &config) { // format excessive blocksize value std::string eb = getSubVersionEB(config.GetMaxBlockSize()); std::vector uacomments; uacomments.push_back("EB" + eb); // sanitize comments per BIP-0014, format user agent and check total size if (gArgs.IsArgSet("-uacomment")) { for (const std::string &cmt : gArgs.GetArgs("-uacomment")) { if (cmt != SanitizeString(cmt, SAFE_CHARS_UA_COMMENT)) { LogPrintf( "User Agent comment (%s) contains unsafe characters. " "We are going to use a sanitize version of the comment.\n", cmt); } uacomments.push_back(cmt); } } std::string subversion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments); if (subversion.size() > MAX_SUBVERSION_LENGTH) { LogPrintf("Total length of network version string (%i) exceeds maximum " "length (%i). Reduce the number or size of uacomments. " "String has been resized to the max length allowed.\n", subversion.size(), MAX_SUBVERSION_LENGTH); subversion.resize(MAX_SUBVERSION_LENGTH - 2); subversion.append(")/"); LogPrintf("Current network string has been set to: %s\n", subversion); } return subversion; } diff --git a/src/net_processing.cpp b/src/net_processing.cpp index c828990d7..2e3030e2f 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1,3838 +1,3839 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "net_processing.h" #include "addrman.h" #include "arith_uint256.h" #include "blockencodings.h" #include "chainparams.h" #include "config.h" #include "consensus/validation.h" #include "hash.h" #include "init.h" #include "merkleblock.h" #include "net.h" #include "netbase.h" #include "netmessagemaker.h" #include "policy/fees.h" #include "policy/policy.h" #include "primitives/block.h" #include "primitives/transaction.h" #include "random.h" #include "tinyformat.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include "utilmoneystr.h" #include "utilstrencodings.h" #include "validation.h" #include "validationinterface.h" #include #include #if defined(NDEBUG) #error "Bitcoin cannot be compiled without assertions." #endif // Used only to inform the wallet of when we last received a block. std::atomic nTimeBestReceived(0); struct IteratorComparator { template bool operator()(const I &a, const I &b) { return &(*a) < &(*b); } }; struct COrphanTx { // When modifying, adapt the copy of this definition in tests/DoS_tests. CTransactionRef tx; NodeId fromPeer; int64_t nTimeExpire; }; std::map mapOrphanTransactions GUARDED_BY(cs_main); std::map::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main); void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main); static size_t vExtraTxnForCompactIt = 0; static std::vector> vExtraTxnForCompact GUARDED_BY(cs_main); // SHA256("main address relay")[0:8] static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // Internal stuff namespace { /** Number of nodes with fSyncStarted. */ int nSyncStarted = 0; /** * Sources of received blocks, saved to be able to send them reject messages or * ban them when processing happens afterwards. Protected by cs_main. * Set mapBlockSource[hash].second to false if the node should not be punished * if the block is invalid. */ std::map> mapBlockSource; /** * Filter for transactions that were recently rejected by AcceptToMemoryPool. * These are not rerequested until the chain tip changes, at which point the * entire filter is reset. Protected by cs_main. * * Without this filter we'd be re-requesting txs from each of our peers, * increasing bandwidth consumption considerably. For instance, with 100 peers, * half of which relay a tx we don't accept, that might be a 50x bandwidth * increase. A flooding attacker attempting to roll-over the filter using * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have * fast peers, so we pick 120,000 to give our peers a two minute window to send * invs to us. * * Decreasing the false positive rate is fairly cheap, so we pick one in a * million to make it highly unlikely for users to have issues with this filter. * * Memory used: 1.3 MB */ std::unique_ptr recentRejects; uint256 hashRecentRejectsChainTip; /** Blocks that are in flight, and that are in the queue to be downloaded. * Protected by cs_main. */ struct QueuedBlock { uint256 hash; //!< Optional. const CBlockIndex *pindex; //!< Whether this block has validated headers at the time of request. bool fValidatedHeaders; //!< Optional, used for CMPCTBLOCK downloads std::unique_ptr partialBlock; }; std::map::iterator>> mapBlocksInFlight; /** Stack of nodes which we have set to announce using compact blocks */ std::list lNodesAnnouncingHeaderAndIDs; /** Number of preferable block download peers. */ int nPreferredDownload = 0; /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads = 0; /** Relay map, protected by cs_main. */ typedef std::map MapRelay; MapRelay mapRelay; /** Expiration-time ordered list of (expire time, relay map entry) pairs, * protected by cs_main). */ std::deque> vRelayExpiration; } // namespace ////////////////////////////////////////////////////////////////////////////// // // Registration of network node signals. // namespace { struct CBlockReject { uint8_t chRejectCode; std::string strRejectReason; uint256 hashBlock; }; /** * Maintain validation-specific state about nodes, protected by cs_main, instead * by CNode's own locks. This simplifies asynchronous operation, where * processing of incoming data is done after the ProcessMessage call returns, * and we're no longer holding the node's locks. */ struct CNodeState { //! The peer's address const CService address; //! Whether we have a fully established connection. bool fCurrentlyConnected; //! Accumulated misbehaviour score for this peer. int nMisbehavior; //! Whether this peer should be disconnected and banned (unless //! whitelisted). bool fShouldBan; //! String name of this peer (debugging/logging purposes). const std::string name; //! List of asynchronously-determined block rejections to notify this peer //! about. std::vector rejects; //! The best known block we know this peer has announced. const CBlockIndex *pindexBestKnownBlock; //! The hash of the last unknown block this peer has announced. uint256 hashLastUnknownBlock; //! The last full block we both have. const CBlockIndex *pindexLastCommonBlock; //! The best header we have sent our peer. const CBlockIndex *pindexBestHeaderSent; //! Length of current-streak of unconnecting headers announcements int nUnconnectingHeaders; //! Whether we've started headers synchronization with this peer. bool fSyncStarted; //! Since when we're stalling block download progress (in microseconds), or //! 0. int64_t nStallingSince; std::list vBlocksInFlight; //! When the first entry in vBlocksInFlight started downloading. Don't care //! when vBlocksInFlight is empty. int64_t nDownloadingSince; int nBlocksInFlight; int nBlocksInFlightValidHeaders; //! Whether we consider this a preferred download peer. bool fPreferredDownload; //! Whether this peer wants invs or headers (when possible) for block //! announcements. bool fPreferHeaders; //! Whether this peer wants invs or cmpctblocks (when possible) for block //! announcements. bool fPreferHeaderAndIDs; /** * Whether this peer will send us cmpctblocks if we request them. * This is not used to gate request logic, as we really only care about * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the * version of compact blocks we send. */ bool fProvidesHeaderAndIDs; /** * If we've announced NODE_WITNESS to this peer: whether the peer sends * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends * non-witnesses in cmpctblocks/blocktxns. */ bool fSupportsDesiredCmpctVersion; CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) { fCurrentlyConnected = false; nMisbehavior = 0; fShouldBan = false; pindexBestKnownBlock = nullptr; hashLastUnknownBlock.SetNull(); pindexLastCommonBlock = nullptr; pindexBestHeaderSent = nullptr; nUnconnectingHeaders = 0; fSyncStarted = false; nStallingSince = 0; nDownloadingSince = 0; nBlocksInFlight = 0; nBlocksInFlightValidHeaders = 0; fPreferredDownload = false; fPreferHeaders = false; fPreferHeaderAndIDs = false; fProvidesHeaderAndIDs = false; fSupportsDesiredCmpctVersion = false; } }; /** Map maintaining per-node state. Requires cs_main. */ std::map mapNodeState; // Requires cs_main. CNodeState *State(NodeId pnode) { std::map::iterator it = mapNodeState.find(pnode); if (it == mapNodeState.end()) { return nullptr; } return &it->second; } void UpdatePreferredDownload(CNode *node, CNodeState *state) { nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient; nPreferredDownload += state->fPreferredDownload; } void PushNodeVersion(const Config &config, CNode *pnode, CConnman &connman, int64_t nTime) { ServiceFlags nLocalNodeServices = pnode->GetLocalServices(); uint64_t nonce = pnode->GetLocalNonce(); int nNodeStartingHeight = pnode->GetMyStartingHeight(); NodeId nodeid = pnode->GetId(); CAddress addr = pnode->addr; CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); CAddress addrMe = CAddress(CService(), nLocalNodeServices); connman.PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe, nonce, userAgent(config), nNodeStartingHeight, ::fRelayTxes)); if (fLogIPs) { LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, " "us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid); } else { LogPrint( BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid); } } void InitializeNode(const Config &config, CNode *pnode, CConnman &connman) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); NodeId nodeid = pnode->GetId(); { LOCK(cs_main); mapNodeState.emplace_hint( mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName))); } if (!pnode->fInbound) { PushNodeVersion(config, pnode, connman, GetTime()); } } void FinalizeNode(NodeId nodeid, bool &fUpdateConnectionTime) { fUpdateConnectionTime = false; LOCK(cs_main); CNodeState *state = State(nodeid); if (state->fSyncStarted) { nSyncStarted--; } if (state->nMisbehavior == 0 && state->fCurrentlyConnected) { fUpdateConnectionTime = true; } for (const QueuedBlock &entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.hash); } // Get rid of stale mapBlockSource entries for this peer as they may leak // if we don't clean them up (I saw on the order of ~100 stale entries on // a full resynch in my testing -- these entries stay forever). // Performance note: most of the time mapBlockSource has 0 or 1 entries. // During synch of blockchain it may end up with as many as 1000 entries, // which still only takes ~1ms to iterate through on even old hardware. // So this memleak cleanup is not expensive and worth doing since even // small leaks are bad. :) for (auto it = mapBlockSource.begin(); it != mapBlockSource.end(); /*NA*/) { if (it->second.first == nodeid) { mapBlockSource.erase(it++); } else { ++it; } } EraseOrphansFor(nodeid); nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); mapNodeState.erase(nodeid); if (mapNodeState.empty()) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); } } // Requires cs_main. // Returns a bool indicating whether we requested this block. // Also used if a block was /not/ received and timed out or started with another // peer. bool MarkBlockAsReceived(const uint256 &hash) { std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { // Last validated block on the queue was received. nPeersWithValidatedDownloads--; } if (state->vBlocksInFlight.begin() == itInFlight->second.second) { // First block on the queue was received, update the start download // time for the next one state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros()); } state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; mapBlocksInFlight.erase(itInFlight); return true; } return false; } // Requires cs_main. // returns false, still setting pit, if the block was already in flight from the // same peer pit will only be valid as long as the same cs_main lock is being // held. static bool MarkBlockAsInFlight(const Config &config, NodeId nodeid, const uint256 &hash, const Consensus::Params &consensusParams, const CBlockIndex *pindex = nullptr, std::list::iterator **pit = nullptr) { CNodeState *state = State(nodeid); assert(state != nullptr); // Short-circuit most stuff in case its from the same node. std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { *pit = &itInFlight->second.second; return false; } // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); std::list::iterator it = state->vBlocksInFlight.insert( state->vBlocksInFlight.end(), {hash, pindex, pindex != nullptr, std::unique_ptr( pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)}); state->nBlocksInFlight++; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->nDownloadingSince = GetTimeMicros(); } if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) { nPeersWithValidatedDownloads++; } itInFlight = mapBlocksInFlight .insert(std::make_pair(hash, std::make_pair(nodeid, it))) .first; if (pit) { *pit = &itInFlight->second.second; } return true; } /** Check whether the last unknown block a peer advertised is not yet known. */ void ProcessBlockAvailability(NodeId nodeid) { CNodeState *state = State(nodeid); assert(state != nullptr); if (!state->hashLastUnknownBlock.IsNull()) { BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock); if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) { if (state->pindexBestKnownBlock == nullptr || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = itOld->second; } state->hashLastUnknownBlock.SetNull(); } } } /** Update tracking information about which blocks a peer is assumed to have. */ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) { CNodeState *state = State(nodeid); assert(state != nullptr); ProcessBlockAvailability(nodeid); BlockMap::iterator it = mapBlockIndex.find(hash); if (it != mapBlockIndex.end() && it->second->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == nullptr || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = it->second; } } else { // An unknown block was announced; just assume that the latest one is // the best one. state->hashLastUnknownBlock = hash; } } void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman &connman) { AssertLockHeld(cs_main); CNodeState *nodestate = State(nodeid); if (!nodestate) { LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid); return; } if (!nodestate->fProvidesHeaderAndIDs) { return; } for (std::list::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { if (*it == nodeid) { lNodesAnnouncingHeaderAndIDs.erase(it); lNodesAnnouncingHeaderAndIDs.push_back(nodeid); return; } } connman.ForNode(nodeid, [&connman](CNode *pfrom) { bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode *pnodeStop) { connman.PushMessage( pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()) .Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } fAnnounceUsingCMPCTBLOCK = true; connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()) .Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); return true; }); } // Requires cs_main bool CanDirectFetch(const Consensus::Params &consensusParams) { return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; } // Requires cs_main bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) { if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) { return true; } if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) { return true; } return false; } /** * Update pindexLastCommonBlock and add not-in-flight missing successors to * vBlocks, until it has at most count entries. */ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector &vBlocks, NodeId &nodeStaller, const Consensus::Params &consensusParams) { if (count == 0) { return; } vBlocks.reserve(vBlocks.size() + count); CNodeState *state = State(nodeid); assert(state != nullptr); // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork) { // This peer has nothing interesting. return; } if (state->pindexLastCommonBlock == nullptr) { // Bootstrap quickly by guessing a parent of our best tip is the forking // point. Guessing wrong in either direction is not a problem. state->pindexLastCommonBlock = chainActive[std::min( state->pindexBestKnownBlock->nHeight, chainActive.Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an // ancestor of its current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor( state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) { return; } std::vector vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in // common with this peer. The +1 is so we can detect stalling, namely if we // would be able to download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; int nMaxHeight = std::min(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { // Read up to 128 (or more, if more blocks than that are needed) // successors of pindexWalk (towards pindexBestKnownBlock) into // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as // expensive as iterating over ~100 CBlockIndex* entries anyway. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max(count - vBlocks.size(), 128)); vToFetch.resize(nToFetch); pindexWalk = state->pindexBestKnownBlock->GetAncestor( pindexWalk->nHeight + nToFetch); vToFetch[nToFetch - 1] = pindexWalk; for (unsigned int i = nToFetch - 1; i > 0; i--) { vToFetch[i - 1] = vToFetch[i]->pprev; } // Iterate over those blocks in vToFetch (in forward direction), adding // the ones that are not yet downloaded and not in flight to vBlocks. In // the mean time, update pindexLastCommonBlock as long as all ancestors // are already downloaded, or if it's already part of our chain (and // therefore don't need it even if pruned). for (const CBlockIndex *pindex : vToFetch) { if (!pindex->IsValid(BLOCK_VALID_TREE)) { // We consider the chain that this peer is on invalid. return; } if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) { if (pindex->nChainTx) { state->pindexLastCommonBlock = pindex; } } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != nodeid) { // We aren't able to fetch anything, but we would be if // the download window was one larger. nodeStaller = waitingfor; } return; } vBlocks.push_back(pindex); if (vBlocks.size() == count) { return; } } else if (waitingfor == -1) { // This is the first already-in-flight block. waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; } } } } } // namespace bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { LOCK(cs_main); CNodeState *state = State(nodeid); if (state == nullptr) { return false; } stats.nMisbehavior = state->nMisbehavior; stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; for (const QueuedBlock &queue : state->vBlocksInFlight) { if (queue.pindex) { stats.vHeightInFlight.push_back(queue.pindex->nHeight); } } return true; } void RegisterNodeSignals(CNodeSignals &nodeSignals) { nodeSignals.ProcessMessages.connect(&ProcessMessages); nodeSignals.SendMessages.connect(&SendMessages); nodeSignals.InitializeNode.connect(&InitializeNode); nodeSignals.FinalizeNode.connect(&FinalizeNode); } void UnregisterNodeSignals(CNodeSignals &nodeSignals) { nodeSignals.ProcessMessages.disconnect(&ProcessMessages); nodeSignals.SendMessages.disconnect(&SendMessages); nodeSignals.InitializeNode.disconnect(&InitializeNode); nodeSignals.FinalizeNode.disconnect(&FinalizeNode); } ////////////////////////////////////////////////////////////////////////////// // // mapOrphanTransactions // void AddToCompactExtraTransactions(const CTransactionRef &tx) { size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) { return; } if (!vExtraTxnForCompact.size()) { vExtraTxnForCompact.resize(max_extra_txn); } vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetId(), tx); vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { const uint256 &txid = tx->GetId(); if (mapOrphanTransactions.count(txid)) { return false; } // Ignore big transactions, to avoid a send-big-orphans memory exhaustion // attack. If a peer has a legitimate large transaction with a missing // parent then we assume it will rebroadcast it later, after the parent // transaction(s) have been mined or received. // 100 orphans, each of which is at most 99,999 bytes big is at most 10 // megabytes of orphans and somewhat more byprev index (in the worst case): unsigned int sz = tx->GetTotalSize(); if (sz >= MAX_STANDARD_TX_SIZE) { LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, txid.ToString()); return false; } auto ret = mapOrphanTransactions.emplace( txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME}); assert(ret.second); for (const CTxIn &txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); } AddToCompactExtraTransactions(tx); LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", txid.ToString(), mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); return true; } static int EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::map::iterator it = mapOrphanTransactions.find(hash); if (it == mapOrphanTransactions.end()) { return 0; } for (const CTxIn &txin : it->second.tx->vin) { auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); if (itPrev == mapOrphanTransactionsByPrev.end()) { continue; } itPrev->second.erase(it); if (itPrev->second.empty()) { mapOrphanTransactionsByPrev.erase(itPrev); } } mapOrphanTransactions.erase(it); return 1; } void EraseOrphansFor(NodeId peer) { int nErased = 0; std::map::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { // Increment to avoid iterator becoming invalid. std::map::iterator maybeErase = iter++; if (maybeErase->second.fromPeer == peer) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } } if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer); } } unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { unsigned int nEvicted = 0; static int64_t nNextSweep; int64_t nNow = GetTime(); if (nNextSweep <= nNow) { // Sweep out expired orphan pool entries: int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; std::map::iterator iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { std::map::iterator maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } else { nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); } } // Sweep again 5 minutes after the next entry that expires in order to // batch the linear scan. nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased); } } while (mapOrphanTransactions.size() > nMaxOrphans) { // Evict a random orphan: uint256 randomhash = GetRandHash(); std::map::iterator it = mapOrphanTransactions.lower_bound(randomhash); if (it == mapOrphanTransactions.end()) { it = mapOrphanTransactions.begin(); } EraseOrphanTx(it->first); ++nEvicted; } return nEvicted; } // Requires cs_main. void Misbehaving(NodeId pnode, int howmuch, const std::string &reason) { if (howmuch == 0) { return; } CNodeState *state = State(pnode); if (state == nullptr) { return; } state->nMisbehavior += howmuch; int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD); if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore) { LogPrintf( "%s: %s peer=%d (%d -> %d) reason: %s BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior - howmuch, state->nMisbehavior, reason.c_str()); state->fShouldBan = true; } else { LogPrintf("%s: %s peer=%d (%d -> %d) reason: %s\n", __func__, state->name, pnode, state->nMisbehavior - howmuch, state->nMisbehavior, reason.c_str()); } } // overloaded variant of above to operate on CNode*s static void Misbehaving(CNode *node, int howmuch, const std::string &reason) { Misbehaving(node->GetId(), howmuch, reason); } ////////////////////////////////////////////////////////////////////////////// // // blockchain -> download logic notification // PeerLogicValidation::PeerLogicValidation(CConnman *connmanIn) : connman(connmanIn) { // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); } void PeerLogicValidation::BlockConnected( const std::shared_ptr &pblock, const CBlockIndex *pindex, const std::vector &vtxConflicted) { LOCK(cs_main); std::vector vOrphanErase; for (const CTransactionRef &ptx : pblock->vtx) { const CTransaction &tx = *ptx; // Which orphan pool entries must we evict? for (size_t j = 0; j < tx.vin.size(); j++) { auto itByPrev = mapOrphanTransactionsByPrev.find(tx.vin[j].prevout); if (itByPrev == mapOrphanTransactionsByPrev.end()) { continue; } for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransaction &orphanTx = *(*mi)->second.tx; const uint256 &orphanHash = orphanTx.GetHash(); vOrphanErase.push_back(orphanHash); } } } // Erase orphan transactions include or precluded by this block if (vOrphanErase.size()) { int nErased = 0; for (uint256 &orphanId : vOrphanErase) { nErased += EraseOrphanTx(orphanId); } LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } } static CCriticalSection cs_most_recent_block; static std::shared_ptr most_recent_block; static std::shared_ptr most_recent_compact_block; static uint256 most_recent_block_hash; void PeerLogicValidation::NewPoWValidBlock( const CBlockIndex *pindex, const std::shared_ptr &pblock) { std::shared_ptr pcmpctblock = std::make_shared(*pblock); const CNetMsgMaker msgMaker(PROTOCOL_VERSION); LOCK(cs_main); static int nHighestFastAnnounce = 0; if (pindex->nHeight <= nHighestFastAnnounce) { return; } nHighestFastAnnounce = pindex->nHeight; uint256 hashBlock(pblock->GetHash()); { LOCK(cs_most_recent_block); most_recent_block_hash = hashBlock; most_recent_block = pblock; most_recent_compact_block = pcmpctblock; } connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, &hashBlock](CNode *pnode) { // TODO: Avoid the repeated-serialization here if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) { return; } ProcessBlockAvailability(pnode->GetId()); CNodeState &state = *State(pnode->GetId()); // If the peer has, or we announced to them the previous block already, // but we don't think they have this one, go ahead and announce it. if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock", hashBlock.ToString(), pnode->id); connman->PushMessage( pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock)); state.pindexBestHeaderSent = pindex; } }); } void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { const int nNewHeight = pindexNew->nHeight; connman->SetBestHeight(nNewHeight); if (!fInitialDownload) { // Find the hashes of all blocks that weren't previously in the best // chain. std::vector vHashes; const CBlockIndex *pindexToAnnounce = pindexNew; while (pindexToAnnounce != pindexFork) { vHashes.push_back(pindexToAnnounce->GetBlockHash()); pindexToAnnounce = pindexToAnnounce->pprev; if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { // Limit announcements in case of a huge reorganization. Rely on // the peer's synchronization mechanism in that case. break; } } // Relay inventory, but don't relay old inventory during initial block // download. connman->ForEachNode([nNewHeight, &vHashes](CNode *pnode) { if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { for (const uint256 &hash : boost::adaptors::reverse(vHashes)) { pnode->PushBlockHash(hash); } } }); connman->WakeMessageHandler(); } nTimeBestReceived = GetTime(); } void PeerLogicValidation::BlockChecked(const CBlock &block, const CValidationState &state) { LOCK(cs_main); const uint256 hash(block.GetHash()); std::map>::iterator it = mapBlockSource.find(hash); int nDoS = 0; if (state.IsInvalid(nDoS)) { // Don't send reject message with code 0 or an internal reject code. if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) { CBlockReject reject = { uint8_t(state.GetRejectCode()), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash}; State(it->second.first)->rejects.push_back(reject); if (nDoS > 0 && it->second.second) { Misbehaving(it->second.first, nDoS, state.GetRejectReason()); } } } // Check that: // 1. The block is valid // 2. We're not in initial block download // 3. This is currently the best block we're aware of. We haven't updated // the tip yet so we have no way to check this directly here. Instead we // just check that there are currently no other blocks in flight. else if (state.IsValid() && !IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, *connman); } } if (it != mapBlockSource.end()) { mapBlockSource.erase(it); } } ////////////////////////////////////////////////////////////////////////////// // // Messages // static bool AlreadyHave(const CInv &inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { switch (inv.type) { case MSG_TX: { assert(recentRejects); if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions // might be now valid, e.g. due to a nLockTime'd tx becoming // valid, or a double-spend. Reset the rejects filter and give // those txs a second chance. hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash(); recentRejects->reset(); } // Use pcoinsTip->HaveCoinInCache as a quick approximation to // exclude requesting or processing some txs which have already been // included in a block. As this is best effort, we only check for // output 0 and 1. This works well enough in practice and we get // diminishing returns with 2 onward. return recentRejects->contains(inv.hash) || mempool.exists(inv.hash) || mapOrphanTransactions.count(inv.hash) || pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1)); } case MSG_BLOCK: return mapBlockIndex.count(inv.hash); } // Don't know what it is, just say we already got one return true; } static void RelayTransaction(const CTransaction &tx, CConnman &connman) { CInv inv(MSG_TX, tx.GetId()); connman.ForEachNode([&inv](CNode *pnode) { pnode->PushInventory(inv); }); } static void RelayAddress(const CAddress &addr, bool fReachable, CConnman &connman) { // Limited relaying of addresses outside our network(s) unsigned int nRelayNodes = fReachable ? 2 : 1; // Relay to a limited number of other nodes. // Use deterministic randomness to send to the same nodes for 24 hours at a // time so the addrKnowns of the chosen nodes prevent repeats. uint64_t hashAddr = addr.GetHash(); const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) .Write(hashAddr << 32) .Write((GetTime() + hashAddr) / (24 * 60 * 60)); FastRandomContext insecure_rand; std::array, 2> best{ {{0, nullptr}, {0, nullptr}}}; assert(nRelayNodes <= best.size()); auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) { if (pnode->nVersion >= CADDR_TIME_VERSION) { uint64_t hashKey = CSipHasher(hasher).Write(pnode->id).Finalize(); for (unsigned int i = 0; i < nRelayNodes; i++) { if (hashKey > best[i].first) { std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); best[i] = std::make_pair(hashKey, pnode); break; } } } }; auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] { for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { best[i].second->PushAddress(addr, insecure_rand); } }; connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } static void ProcessGetData(const Config &config, CNode *pfrom, const Consensus::Params &consensusParams, CConnman &connman, const std::atomic &interruptMsgProc) { std::deque::iterator it = pfrom->vRecvGetData.begin(); std::vector vNotFound; const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); LOCK(cs_main); while (it != pfrom->vRecvGetData.end()) { // Don't bother if send buffer is too full to respond anyway. if (pfrom->fPauseSend) { break; } const CInv &inv = *it; { if (interruptMsgProc) { return; } it++; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { bool send = false; BlockMap::iterator mi = mapBlockIndex.find(inv.hash); if (mi != mapBlockIndex.end()) { if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) && mi->second->IsValid(BLOCK_VALID_TREE)) { // If we have the block and all of its parents, but have // not yet validated it, we might be in the middle of // connecting it (ie in the unlock of cs_main before // ActivateBestChain but after AcceptBlock). In this // case, we need to run ActivateBestChain prior to // checking the relay conditions below. std::shared_ptr a_recent_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; } CValidationState dummy; ActivateBestChain(config, dummy, a_recent_block); } if (chainActive.Contains(mi->second)) { send = true; } else { static const int nOneMonth = 30 * 24 * 60 * 60; // To prevent fingerprinting attacks, only send blocks // outside of the active chain if they are valid, and no // more than a month older (both in time, and in best // equivalent proof of work) than the best header chain // we know about. send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) && (GetBlockProofEquivalentTime( *pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth); if (!send) { LogPrintf("%s: ignoring request from peer=%i for " "old block that isn't in the main " "chain\n", __func__, pfrom->GetId()); } } } // Disconnect node in case we have reached the outbound limit // for serving historical blocks never disconnect whitelisted // nodes. // assume > 1 week = historical static const int nOneWeek = 7 * 24 * 60 * 60; if (send && connman.OutboundTargetReached(true) && (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "historical block serving limit " "reached, disconnect peer=%d\n", pfrom->GetId()); // disconnect node pfrom->fDisconnect = true; send = false; } // Pruned nodes may have deleted the block, so check whether // it's available before trying to send. if (send && (mi->second->nStatus & BLOCK_HAVE_DATA)) { // Send block from disk CBlock block; if (!ReadBlockFromDisk(block, (*mi).second, config)) { assert(!"cannot load block from disk"); } if (inv.type == MSG_BLOCK) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::BLOCK, block)); } else if (inv.type == MSG_FILTERED_BLOCK) { bool sendMerkleBlock = false; CMerkleBlock merkleBlock; { LOCK(pfrom->cs_filter); if (pfrom->pfilter) { sendMerkleBlock = true; merkleBlock = CMerkleBlock(block, *pfrom->pfilter); } } if (sendMerkleBlock) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); // CMerkleBlock just contains hashes, so also push // any transactions in the block the client did not // see. This avoids hurting performance by // pointlessly requiring a round-trip. Note that // there is currently no way for a node to request // any single transactions we didn't send here - // they must either disconnect and retry or request // the full block. Thus, the protocol spec specified // allows for us to provide duplicate txn here, // however we MUST always provide at least what the // remote peer needs. typedef std::pair PairType; for (PairType &pair : merkleBlock.vMatchedTxn) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::TX, *block.vtx[pair.first])); } } // else // no response } else if (inv.type == MSG_CMPCT_BLOCK) { // If a peer is asking for old blocks, we're almost // guaranteed they won't have a useful mempool to match // against a compact block, and we don't feel like // constructing the object for them, so instead we // respond with the full, non-compact block. int nSendFlags = 0; if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) { CBlockHeaderAndShortTxIDs cmpctblock(block); connman.PushMessage( pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } else { connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, block)); } } // Trigger the peer node to send a getblocks request for the // next batch of inventory. if (inv.hash == pfrom->hashContinue) { // Bypass PushInventory, this must send even if // redundant, and we want it right after the last block // so they don't wait for other stuff first. std::vector vInv; vInv.push_back( CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash())); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::INV, vInv)); pfrom->hashContinue.SetNull(); } } } else if (inv.type == MSG_TX) { // Send stream from relay memory bool push = false; auto mi = mapRelay.find(inv.hash); int nSendFlags = 0; if (mi != mapRelay.end()) { connman.PushMessage( pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second)); push = true; } else if (pfrom->timeLastMempoolReq) { auto txinfo = mempool.info(inv.hash); // To protect privacy, do not answer getdata using the // mempool when that TX couldn't have been INVed in reply to // a MEMPOOL request. if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) { connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx)); push = true; } } if (!push) { vNotFound.push_back(inv); } } // Track requests for our stuff. GetMainSignals().Inventory(inv.hash); if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { break; } } } pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it); if (!vNotFound.empty()) { // Let the peer know that we didn't find what it asked for, so it // doesn't have to wait around forever. Currently only SPV clients // actually care about this message: it's needed when they are // recursively walking the dependencies of relevant unconfirmed // transactions. SPV clients want to do that because they want to know // about (and store and rebroadcast and risk analyze) the dependencies // of transactions relevant to them, without having to download the // entire memory pool. connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } uint32_t GetFetchFlags(CNode *pfrom, const CBlockIndex *pprev, const Consensus::Params &chainparams) { uint32_t nFetchFlags = 0; return nFetchFlags; } inline static void SendBlockTransactions(const CBlock &block, const BlockTransactionsRequest &req, CNode *pfrom, CConnman &connman) { BlockTransactions resp(req); for (size_t i = 0; i < req.indexes.size(); i++) { if (req.indexes[i] >= block.vtx.size()) { LOCK(cs_main); Misbehaving(pfrom, 100, "out-of-bound-tx-index"); LogPrintf( "Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->id); return; } resp.txn[i] = block.vtx[req.indexes[i]]; } LOCK(cs_main); const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); int nSendFlags = 0; connman.PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } static bool ProcessMessage(const Config &config, CNode *pfrom, const std::string &strCommand, CDataStream &vRecv, int64_t nTimeReceived, const CChainParams &chainparams, CConnman &connman, const std::atomic &interruptMsgProc) { LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->id); if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) { LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); return true; } if (!(pfrom->GetLocalServices() & NODE_BLOOM) && (strCommand == NetMsgType::FILTERLOAD || strCommand == NetMsgType::FILTERADD)) { if (pfrom->nVersion >= NO_BLOOM_VERSION) { LOCK(cs_main); Misbehaving(pfrom, 100, "no-bloom-version"); return false; } else { pfrom->fDisconnect = true; return false; } } if (strCommand == NetMsgType::REJECT) { if (LogAcceptCategory(BCLog::NET)) { try { std::string strMsg; uint8_t ccode; std::string strReason; vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH); std::ostringstream ss; ss << strMsg << " code " << itostr(ccode) << ": " << strReason; if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX) { uint256 hash; vRecv >> hash; ss << ": hash " << hash.ToString(); } LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str())); } catch (const std::ios_base::failure &) { // Avoid feedback loops by preventing reject messages from // triggering a new reject message. LogPrint(BCLog::NET, "Unparseable reject message received\n"); } } } else if (strCommand == NetMsgType::VERSION) { // Each connection can only send one version message if (pfrom->nVersion != 0) { connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message"))); LOCK(cs_main); Misbehaving(pfrom, 1, "multiple-version"); return false; } int64_t nTime; CAddress addrMe; CAddress addrFrom; uint64_t nNonce = 1; uint64_t nServiceInt; ServiceFlags nServices; int nVersion; int nSendVersion; std::string strSubVer; std::string cleanSubVer; int nStartingHeight = -1; bool fRelay = true; vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; nSendVersion = std::min(nVersion, PROTOCOL_VERSION); nServices = ServiceFlags(nServiceInt); if (!pfrom->fInbound) { connman.SetServices(pfrom->addr, nServices); } if (pfrom->nServicesExpected & ~nServices) { LogPrint(BCLog::NET, "peer=%d does not offer the expected services " "(%08x offered, %08x expected); " "disconnecting\n", pfrom->id, nServices, pfrom->nServicesExpected); connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD, strprintf("Expected to offer services %08x", pfrom->nServicesExpected))); pfrom->fDisconnect = true; return false; } if (nVersion < MIN_PEER_PROTO_VERSION) { // disconnect from peers older than this proto version LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->id, nVersion); connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE, strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION))); pfrom->fDisconnect = true; return false; } if (!vRecv.empty()) { vRecv >> addrFrom >> nNonce; } if (!vRecv.empty()) { vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); cleanSubVer = SanitizeString(strSubVer); } if (!vRecv.empty()) { vRecv >> nStartingHeight; } if (!vRecv.empty()) { vRecv >> fRelay; } // Disconnect if we connected to ourself if (pfrom->fInbound && !connman.CheckIncomingNonce(nNonce)) { LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString()); pfrom->fDisconnect = true; return true; } if (pfrom->fInbound && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear if (pfrom->fInbound) { PushNodeVersion(config, pfrom, connman, GetAdjustedTime()); } connman.PushMessage( pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK)); pfrom->nServices = nServices; pfrom->SetAddrLocal(addrMe); { LOCK(pfrom->cs_SubVer); pfrom->strSubVer = strSubVer; pfrom->cleanSubVer = cleanSubVer; } pfrom->nStartingHeight = nStartingHeight; pfrom->fClient = !(nServices & NODE_NETWORK); { LOCK(pfrom->cs_filter); // set to true after we get the first filter* message pfrom->fRelayTxes = fRelay; } // Change version pfrom->SetSendVersion(nSendVersion); pfrom->nVersion = nVersion; // Potentially mark this peer as a preferred download peer. { LOCK(cs_main); UpdatePreferredDownload(pfrom, State(pfrom->GetId())); } if (!pfrom->fInbound) { // Advertise our address if (fListen && !IsInitialBlockDownload()) { CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices()); FastRandomContext insecure_rand; if (addr.IsRoutable()) { LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom->PushAddress(addr, insecure_rand); } else if (IsPeerAddrLocalGood(pfrom)) { addr.SetIP(addrMe); LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom->PushAddress(addr, insecure_rand); } } // Get recent addresses if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman.GetAddressCount() < 1000) { connman.PushMessage( pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR)); pfrom->fGetAddr = true; } connman.MarkAddressGood(pfrom->addr); } std::string remoteAddr; if (fLogIPs) { remoteAddr = ", peeraddr=" + pfrom->addr.ToString(); } LogPrintf("receive version message: [%s] %s: version %d, blocks=%d, " "us=%s, peer=%d%s\n", pfrom->addr.ToString().c_str(), cleanSubVer, pfrom->nVersion, pfrom->nStartingHeight, addrMe.ToString(), pfrom->id, remoteAddr); int64_t nTimeOffset = nTime - GetTime(); pfrom->nTimeOffset = nTimeOffset; AddTimeData(pfrom->addr, nTimeOffset); // If the peer is old enough to have the old alert system, send it the // final alert. if (pfrom->nVersion <= 70012) { CDataStream finalAlert( ParseHex("60010000000000000000000000ffffff7f00000000ffffff7ffef" "fff7f01ffffff7f00000000ffffff7f00ffffff7f002f55524745" "4e543a20416c657274206b657920636f6d70726f6d697365642c2" "075706772616465207265717569726564004630440220653febd6" "410f470f6bae11cad19c48413becb1ac2c17f908fd0fd53bdc3ab" "d5202206d0e9c96fe88d4a0f01ed9dedae2b6f9e00da94cad0fec" "aae66ecf689bf71b50"), SER_NETWORK, PROTOCOL_VERSION); connman.PushMessage( pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert)); } // Feeler connections exist only to verify if address is online. if (pfrom->fFeeler) { assert(pfrom->fInbound == false); pfrom->fDisconnect = true; } return true; } else if (pfrom->nVersion == 0) { // Must have a version message before anything else LOCK(cs_main); Misbehaving(pfrom, 1, "missing-version"); return false; } // At this point, the outgoing message serialization version can't change. const CNetMsgMaker msgMaker(pfrom->GetSendVersion()); if (strCommand == NetMsgType::VERACK) { pfrom->SetRecvVersion( std::min(pfrom->nVersion.load(), PROTOCOL_VERSION)); if (!pfrom->fInbound) { // Mark this node as currently connected, so we update its timestamp // later. LOCK(cs_main); State(pfrom->GetId())->fCurrentlyConnected = true; } if (pfrom->nVersion >= SENDHEADERS_VERSION) { // Tell our peer we prefer to receive headers rather than inv's // We send this to non-NODE NETWORK peers as well, because even // non-NODE NETWORK peers can announce blocks (such as pruning // nodes) connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); } if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) { // Tell our peer we are willing to provide version 1 or 2 // cmpctblocks. However, we do not request new block announcements // using cmpctblock messages. We send this to non-NODE NETWORK peers // as well, because they may wish to request compact blocks from us. bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 1; connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); } pfrom->fSuccessfullyConnected = true; } else if (!pfrom->fSuccessfullyConnected) { // Must have a verack message before anything else LOCK(cs_main); Misbehaving(pfrom, 1, "missing-verack"); return false; } else if (strCommand == NetMsgType::ADDR) { std::vector vAddr; vRecv >> vAddr; // Don't want addr from older versions unless seeding if (pfrom->nVersion < CADDR_TIME_VERSION && connman.GetAddressCount() > 1000) { return true; } if (vAddr.size() > 1000) { LOCK(cs_main); Misbehaving(pfrom, 20, "oversized-addr"); return error("message addr size() = %u", vAddr.size()); } // Store the new addresses std::vector vAddrOk; int64_t nNow = GetAdjustedTime(); int64_t nSince = nNow - 10 * 60; for (CAddress &addr : vAddr) { if (interruptMsgProc) { return true; } if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES) { continue; } if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) { addr.nTime = nNow - 5 * 24 * 60 * 60; } pfrom->AddAddressKnown(addr); bool fReachable = IsReachable(addr); if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes RelayAddress(addr, fReachable, connman); } // Do not store addresses outside our network if (fReachable) { vAddrOk.push_back(addr); } } connman.AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60); if (vAddr.size() < 1000) { pfrom->fGetAddr = false; } if (pfrom->fOneShot) { pfrom->fDisconnect = true; } } else if (strCommand == NetMsgType::SENDHEADERS) { LOCK(cs_main); State(pfrom->GetId())->fPreferHeaders = true; } else if (strCommand == NetMsgType::SENDCMPCT) { bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 0; vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; if (nCMPCTBLOCKVersion == 1) { LOCK(cs_main); // fProvidesHeaderAndIDs is used to "lock in" version of compact // blocks we send. if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) { State(pfrom->GetId())->fProvidesHeaderAndIDs = true; } State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) { State(pfrom->GetId())->fSupportsDesiredCmpctVersion = true; } } } else if (strCommand == NetMsgType::INV) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); Misbehaving(pfrom, 20, "oversized-inv"); return error("message inv size() = %u", vInv.size()); } bool fBlocksOnly = !fRelayTxes; // Allow whitelisted peers to send data other than blocks in blocks only // mode if whitelistrelay is true if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) { fBlocksOnly = false; } LOCK(cs_main); uint32_t nFetchFlags = GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()); std::vector vToFetch; for (size_t nInv = 0; nInv < vInv.size(); nInv++) { CInv &inv = vInv[nInv]; if (interruptMsgProc) { return true; } bool fAlreadyHave = AlreadyHave(inv); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->id); if (inv.type == MSG_TX) { inv.type |= nFetchFlags; } if (inv.type == MSG_BLOCK) { UpdateBlockAvailability(pfrom->GetId(), inv.hash); if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { // We used to request the full block here, but since // headers-announcements are now the primary method of // announcement on the network, and since, in the case that // a node fell back to inv we probably have a reorg which we // should get the headers for first, we now only provide a // getheaders response here. When we receive the headers, we // will then ask for the blocks we need. connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash)); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->id); } } else { pfrom->AddInventoryKnown(inv); if (fBlocksOnly) { LogPrint(BCLog::NET, "transaction (%s) inv sent in " "violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->id); } else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) { pfrom->AskFor(inv); } } // Track requests for our stuff GetMainSignals().Inventory(inv.hash); } if (!vToFetch.empty()) { connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vToFetch)); } } else if (strCommand == NetMsgType::GETDATA) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { LOCK(cs_main); Misbehaving(pfrom, 20, "too-many-inv"); return error("message getdata size() = %u", vInv.size()); } LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->id); if (vInv.size() > 0) { LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->id); } pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end()); ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); } else if (strCommand == NetMsgType::GETBLOCKS) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; // We might have announced the currently-being-connected tip using a // compact block, which resulted in the peer sending a getblocks // request, which we would otherwise respond to without the new block. // To avoid this situation we simply verify that we are on our best // known chain now. This is super overkill, but we handle it better // for getheaders requests, and there are no known nodes which support // compact blocks but still use getblocks to request blocks. { std::shared_ptr a_recent_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; } CValidationState dummy; ActivateBestChain(config, dummy, a_recent_block); } LOCK(cs_main); // Find the last block the caller has in the main chain const CBlockIndex *pindex = FindForkInGlobalIndex(chainActive, locator); // Send the rest of the chain if (pindex) { pindex = chainActive.Next(pindex); } int nLimit = 500; LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { if (pindex->GetBlockHash() == hashStop) { LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } // If pruning, don't inv blocks unless we have on disk and are // likely to still have for some reasonable time window (1 hour) // that block relay might require. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing; if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave)) { LogPrint( BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash())); if (--nLimit <= 0) { // When this block is requested, we'll send an inv that'll // trigger the peer to getblocks the next batch of inventory. LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); pfrom->hashContinue = pindex->GetBlockHash(); break; } } } else if (strCommand == NetMsgType::GETBLOCKTXN) { BlockTransactionsRequest req; vRecv >> req; std::shared_ptr recent_block; { LOCK(cs_most_recent_block); if (most_recent_block_hash == req.blockhash) { recent_block = most_recent_block; } // Unlock cs_most_recent_block to avoid cs_main lock inversion } if (recent_block) { SendBlockTransactions(*recent_block, req, pfrom, connman); return true; } LOCK(cs_main); BlockMap::iterator it = mapBlockIndex.find(req.blockhash); if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) { LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id); return true; } if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) { // If an older block is requested (should never happen in practice, // but can happen in tests) send a block response instead of a // blocktxn response. Sending a full block response instead of a // small blocktxn response is preferable in the case where a peer // might maliciously send lots of getblocktxn requests to trigger // expensive disk reads, because it will require the peer to // actually receive all the data read from disk over the network. LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->id, MAX_BLOCKTXN_DEPTH); CInv inv; inv.type = MSG_BLOCK; inv.hash = req.blockhash; pfrom->vRecvGetData.push_back(inv); ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); return true; } CBlock block; bool ret = ReadBlockFromDisk(block, it->second, config); assert(ret); SendBlockTransactions(block, req, pfrom, connman); } else if (strCommand == NetMsgType::GETHEADERS) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; LOCK(cs_main); if (IsInitialBlockDownload() && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because " "node is in initial block download\n", pfrom->id); return true; } CNodeState *nodestate = State(pfrom->GetId()); const CBlockIndex *pindex = nullptr; if (locator.IsNull()) { // If locator is null, return the hashStop block BlockMap::iterator mi = mapBlockIndex.find(hashStop); if (mi == mapBlockIndex.end()) { return true; } pindex = (*mi).second; } else { // Find the last block the caller has in the main chain pindex = FindForkInGlobalIndex(chainActive, locator); if (pindex) { pindex = chainActive.Next(pindex); } } // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx // count at the end std::vector vHeaders; int nLimit = MAX_HEADERS_RESULTS; LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->id); for (; pindex; pindex = chainActive.Next(pindex)) { vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) { break; } } // pindex can be nullptr either if we sent chainActive.Tip() OR // if our peer has chainActive.Tip() (and thus we are sending an empty // headers message). In both cases it's safe to update // pindexBestHeaderSent to be our tip. // // It is important that we simply reset the BestHeaderSent value here, // and not max(BestHeaderSent, newHeaderSent). We might have announced // the currently-being-connected tip using a compact block, which // resulted in the peer sending a headers request, which we respond to // without the new block. By resetting the BestHeaderSent, we ensure we // will re-announce the new block via headers (or compact blocks again) // in the SendMessages logic. nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip(); connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); } else if (strCommand == NetMsgType::TX) { // Stop processing the transaction early if // We are in blocks only mode and peer is either not whitelisted or // whitelistrelay is off if (!fRelayTxes && (!pfrom->fWhitelisted || !gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->id); return true; } std::deque vWorkQueue; std::vector vEraseQueue; CTransactionRef ptx; vRecv >> ptx; const CTransaction &tx = *ptx; CInv inv(MSG_TX, tx.GetId()); pfrom->AddInventoryKnown(inv); LOCK(cs_main); bool fMissingInputs = false; CValidationState state; pfrom->setAskFor.erase(inv.hash); mapAlreadyAskedFor.erase(inv.hash); std::list lRemovedTxn; if (!AlreadyHave(inv) && AcceptToMemoryPool(config, mempool, state, ptx, true, &fMissingInputs, &lRemovedTxn)) { mempool.check(pcoinsTip); RelayTransaction(tx, connman); for (size_t i = 0; i < tx.vout.size(); i++) { vWorkQueue.emplace_back(inv.hash, i); } pfrom->nLastTXTime = GetTime(); LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s " "(poolsz %u txn, %u kB)\n", pfrom->id, tx.GetId().ToString(), mempool.size(), mempool.DynamicMemoryUsage() / 1000); // Recursively process any orphan transactions that depended on this // one std::set setMisbehaving; while (!vWorkQueue.empty()) { auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front()); vWorkQueue.pop_front(); if (itByPrev == mapOrphanTransactionsByPrev.end()) { continue; } for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransactionRef &porphanTx = (*mi)->second.tx; const CTransaction &orphanTx = *porphanTx; const uint256 &orphanId = orphanTx.GetId(); NodeId fromPeer = (*mi)->second.fromPeer; bool fMissingInputs2 = false; // Use a dummy CValidationState so someone can't setup nodes // to counter-DoS based on orphan resolution (that is, // feeding people an invalid transaction based on LegitTxX // in order to get anyone relaying LegitTxX banned) CValidationState stateDummy; if (setMisbehaving.count(fromPeer)) { continue; } if (AcceptToMemoryPool(config, mempool, stateDummy, porphanTx, true, &fMissingInputs2, &lRemovedTxn)) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanId.ToString()); RelayTransaction(orphanTx, connman); for (size_t i = 0; i < orphanTx.vout.size(); i++) { vWorkQueue.emplace_back(orphanId, i); } vEraseQueue.push_back(orphanId); } else if (!fMissingInputs2) { int nDos = 0; if (stateDummy.IsInvalid(nDos) && nDos > 0) { // Punish peer that gave us an invalid orphan tx Misbehaving(fromPeer, nDos, "invalid-orphan-tx"); setMisbehaving.insert(fromPeer); LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanId.ToString()); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee/priority LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanId.ToString()); vEraseQueue.push_back(orphanId); if (!stateDummy.CorruptionPossible()) { // Do not use rejection cache for witness // transactions or witness-stripped transactions, as // they can have been malleated. See // https://github.com/bitcoin/bitcoin/issues/8279 // for details. assert(recentRejects); recentRejects->insert(orphanId); } } mempool.check(pcoinsTip); } } for (uint256 hash : vEraseQueue) { EraseOrphanTx(hash); } } else if (fMissingInputs) { // It may be the case that the orphans parents have all been // rejected. bool fRejectedParents = false; for (const CTxIn &txin : tx.vin) { if (recentRejects->contains(txin.prevout.hash)) { fRejectedParents = true; break; } } if (!fRejectedParents) { uint32_t nFetchFlags = GetFetchFlags( pfrom, chainActive.Tip(), chainparams.GetConsensus()); for (const CTxIn &txin : tx.vin) { CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); pfrom->AddInventoryKnown(_inv); if (!AlreadyHave(_inv)) { pfrom->AskFor(_inv); } } AddOrphanTx(ptx, pfrom->GetId()); // DoS prevention: do not allow mapOrphanTransactions to grow // unbounded unsigned int nMaxOrphanTx = (unsigned int)std::max( int64_t(0), gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); if (nEvicted > 0) { LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted); } } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n", tx.GetId().ToString()); // We will continue to reject this tx since it has rejected // parents so avoid re-requesting it from other peers. recentRejects->insert(tx.GetId()); } } else { if (!state.CorruptionPossible()) { // Do not use rejection cache for witness transactions or // witness-stripped transactions, as they can have been // malleated. See https://github.com/bitcoin/bitcoin/issues/8279 // for details. assert(recentRejects); recentRejects->insert(tx.GetId()); if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } } if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { // Always relay transactions received from whitelisted peers, // even if they were already in the mempool or rejected from it // due to policy, allowing the node to function as a gateway for // nodes hidden behind it. // // Never relay transactions that we would assign a non-zero DoS // score for, as we expect peers to do the same with us in that // case. int nDoS = 0; if (!state.IsInvalid(nDoS) || nDoS == 0) { LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetId().ToString(), pfrom->id); RelayTransaction(tx, connman); } else { LogPrintf("Not relaying invalid transaction %s from " "whitelisted peer=%d (%s)\n", tx.GetId().ToString(), pfrom->id, FormatStateMessage(state)); } } } for (const CTransactionRef &removedTx : lRemovedTxn) { AddToCompactExtraTransactions(removedTx); } int nDoS = 0; if (state.IsInvalid(nDoS)) { LogPrint( BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), pfrom->id, FormatStateMessage(state)); // Never send AcceptToMemoryPool's internal codes over P2P. if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, uint8_t(state.GetRejectCode()), state.GetRejectReason().substr( 0, MAX_REJECT_MESSAGE_LENGTH), inv.hash)); } if (nDoS > 0) { Misbehaving(pfrom, nDoS, state.GetRejectReason()); } } } // Ignore blocks received while importing else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) { CBlockHeaderAndShortTxIDs cmpctblock; vRecv >> cmpctblock; { LOCK(cs_main); if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) { // Doesn't connect (or is genesis), instead of DoSing in // AcceptBlockHeader, request deeper headers if (!IsInitialBlockDownload()) { connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); } return true; } } const CBlockIndex *pindex = nullptr; CValidationState state; if (!ProcessNewBlockHeaders(config, {cmpctblock.header}, state, &pindex)) { int nDoS; if (state.IsInvalid(nDoS)) { if (nDoS > 0) { LOCK(cs_main); Misbehaving(pfrom, nDoS, state.GetRejectReason()); } LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->id); return true; } } // When we succeed in decoding a block's txids from a cmpctblock // message we typically jump to the BLOCKTXN handling code, with a // dummy (empty) BLOCKTXN message, to re-use the logic there in // completing processing of the putative block (without cs_main). bool fProcessBLOCKTXN = false; CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION); // If we end up treating this as a plain headers message, call that as // well // without cs_main. bool fRevertToHeaderProcessing = false; CDataStream vHeadersMsg(SER_NETWORK, PROTOCOL_VERSION); // Keep a CBlock for "optimistic" compactblock reconstructions (see // below) std::shared_ptr pblock = std::make_shared(); bool fBlockReconstructed = false; { LOCK(cs_main); // If AcceptBlockHeader returned true, it set pindex assert(pindex); UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash()); std::map::iterator>>:: iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); if (pindex->nStatus & BLOCK_HAVE_DATA) { // Nothing to do here return true; } if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better pindex->nTx != 0) { // We had this block at some point, but pruned it if (fAlreadyInFlight) { // We requested this block for some reason, but our mempool // will probably be useless so we just grab the block via // normal getdata. std::vector vInv(1); vInv[0] = CInv( MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return true; } // If we're not close to tip yet, give up and let parallel block // fetch work its magic. if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus())) { return true; } CNodeState *nodestate = State(pfrom->GetId()); // We want to be a bit conservative just to be extra careful about // DoS possibilities in compact block processing... if (pindex->nHeight <= chainActive.Height() + 2) { if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) { std::list::iterator *queuedBlockIt = nullptr; if (!MarkBlockAsInFlight(config, pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) { (*queuedBlockIt) ->partialBlock.reset( new PartiallyDownloadedBlock(config, &mempool)); } else { // The block was already in flight using compact // blocks from the same peer. LogPrint(BCLog::NET, "Peer sent us compact block " "we were already syncing!\n"); return true; } } PartiallyDownloadedBlock &partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist MarkBlockAsReceived(pindex->GetBlockHash()); Misbehaving(pfrom, 100, "invalid-cmpctblk"); LogPrintf("Peer %d sent us invalid compact block\n", pfrom->id); return true; } else if (status == READ_STATUS_FAILED) { // Duplicate txindexes, the block is now in-flight, so // just request it. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return true; } BlockTransactionsRequest req; for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { if (!partialBlock.IsTxAvailable(i)) { req.indexes.push_back(i); } } if (req.indexes.empty()) { // Dirty hack to jump to BLOCKTXN code (TODO: move // message handling into their own functions) BlockTransactions txn; txn.blockhash = cmpctblock.header.GetHash(); blockTxnMsg << txn; fProcessBLOCKTXN = true; } else { req.blockhash = pindex->GetBlockHash(); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); } } else { // This block is either already in flight from a different // peer, or this peer has too many blocks outstanding to // download from. Optimistically try to reconstruct anyway // since we might be able to without any round trips. PartiallyDownloadedBlock tempBlock(config, &mempool); ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status != READ_STATUS_OK) { // TODO: don't ignore failures return true; } std::vector dummy; status = tempBlock.FillBlock(*pblock, dummy); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } } } else { if (fAlreadyInFlight) { // We requested this block, but its far into the future, so // our mempool will probably be useless - request the block // normally. std::vector vInv(1); vInv[0] = CInv( MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash()); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return true; } else { // If this was an announce-cmpctblock, we want the same // treatment as a header message. Dirty hack to process as // if it were just a headers message (TODO: move message // handling into their own functions) std::vector headers; headers.push_back(cmpctblock.header); vHeadersMsg << headers; fRevertToHeaderProcessing = true; } } } // cs_main if (fProcessBLOCKTXN) { return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc); } if (fRevertToHeaderProcessing) { return ProcessMessage(config, pfrom, NetMsgType::HEADERS, vHeadersMsg, nTimeReceived, chainparams, connman, interruptMsgProc); } if (fBlockReconstructed) { // If we got here, we were able to optimistically reconstruct a // block that is in flight from some other peer. { LOCK(cs_main); mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false)); } bool fNewBlock = false; ProcessNewBlock(config, pblock, true, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } // hold cs_main for CBlockIndex::IsValid() LOCK(cs_main); if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) { // Clear download state for this block, which is in process from // some other peer. We do this after calling. ProcessNewBlock so // that a malleated cmpctblock announcement can't be used to // interfere with block relay. MarkBlockAsReceived(pblock->GetHash()); } } } else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing { BlockTransactions resp; vRecv >> resp; std::shared_ptr pblock = std::make_shared(); bool fBlockRead = false; { LOCK(cs_main); std::map::iterator>>:: iterator it = mapBlocksInFlight.find(resp.blockhash); if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || it->second.first != pfrom->GetId()) { LogPrint(BCLog::NET, "Peer %d sent us block transactions for block " "we weren't expecting\n", pfrom->id); return true; } PartiallyDownloadedBlock &partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist. MarkBlockAsReceived(resp.blockhash); Misbehaving(pfrom, 100, "invalid-cmpctblk-txns"); LogPrintf("Peer %d sent us invalid compact block/non-matching " "block transactions\n", pfrom->id); return true; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector invs; invs.push_back( CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash)); connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); } else { // Block is either okay, or possibly we received // READ_STATUS_CHECKBLOCK_FAILED. // Note that CheckBlock can only fail for one of a few reasons: // 1. bad-proof-of-work (impossible here, because we've already // accepted the header) // 2. merkleroot doesn't match the transactions given (already // caught in FillBlock with READ_STATUS_FAILED, so // impossible here) // 3. the block is otherwise invalid (eg invalid coinbase, // block is too big, too many legacy sigops, etc). // So if CheckBlock failed, #3 is the only possibility. // Under BIP 152, we don't DoS-ban unless proof of work is // invalid (we don't require all the stateless checks to have // been run). This is handled below, so just treat this as // though the block was successfully read, and rely on the // handling in ProcessNewBlock to ensure the block index is // updated, reject messages go out, etc. // it is now an empty pointer MarkBlockAsReceived(resp.blockhash); fBlockRead = true; // mapBlockSource is only used for sending reject messages and // DoS scores, so the race between here and cs_main in // ProcessNewBlock is fine. BIP 152 permits peers to relay // compact blocks after validating the header only; we should // not punish peers if the block turns out to be invalid. mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false)); } } // Don't hold cs_main when we call into ProcessNewBlock if (fBlockRead) { bool fNewBlock = false; // Since we requested this block (it was in mapBlocksInFlight), // force it to be processed, even if it would not be a candidate for // new tip (missing previous block, chain not long enough, etc) ProcessNewBlock(config, pblock, true, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } } } // Ignore headers received while importing else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) { std::vector headers; // Bypass the normal CBlock deserialization, as we don't want to risk // deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { LOCK(cs_main); Misbehaving(pfrom, 20, "too-many-headers"); return error("headers message size = %u", nCount); } headers.resize(nCount); for (unsigned int n = 0; n < nCount; n++) { vRecv >> headers[n]; // Ignore tx count; assume it is 0. ReadCompactSize(vRecv); } if (nCount == 0) { // Nothing interesting. Stop asking this peers for more headers. return true; } const CBlockIndex *pindexLast = nullptr; { LOCK(cs_main); CNodeState *nodestate = State(pfrom->GetId()); // If this looks like it could be a block announcement (nCount < // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers // that // don't connect: // - Send a getheaders message in response to try to connect the // chain. // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that // don't connect before giving DoS points // - Once a headers message is received that is valid and does // connect, // nUnconnectingHeaders gets reset back to 0. if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256())); LogPrint(BCLog::NET, "received header %s: missing prev block " "%s, sending getheaders (%d) to end " "(peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight, pfrom->id, nodestate->nUnconnectingHeaders); // Set hashLastUnknownBlock for this peer, so that if we // eventually get the headers - even from a different peer - // we can use this peer to download. UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash()); if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { // The peer is sending us many headers we can't connect. Misbehaving(pfrom, 20, "too-many-unconnected-headers"); } return true; } uint256 hashLastBlock; for (const CBlockHeader &header : headers) { if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { Misbehaving(pfrom, 20, "disconnected-header"); return error("non-continuous headers sequence"); } hashLastBlock = header.GetHash(); } } CValidationState state; if (!ProcessNewBlockHeaders(config, headers, state, &pindexLast)) { int nDoS; if (state.IsInvalid(nDoS)) { if (nDoS > 0) { LOCK(cs_main); Misbehaving(pfrom, nDoS, state.GetRejectReason()); } return error("invalid header received"); } } { LOCK(cs_main); CNodeState *nodestate = State(pfrom->GetId()); if (nodestate->nUnconnectingHeaders > 0) { LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->id, nodestate->nUnconnectingHeaders); } nodestate->nUnconnectingHeaders = 0; assert(pindexLast); UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash()); if (nCount == MAX_HEADERS_RESULTS) { // Headers message had its maximum size; the peer may have more // headers. // TODO: optimize: if pindexLast is an ancestor of // chainActive.Tip or pindexBestHeader, continue from there // instead. LogPrint( BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->id, pfrom->nStartingHeight); connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256())); } bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus()); // If this set of headers is valid and ends in a block with at least // as much work as our tip, download as much as possible. if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) { std::vector vToFetch; const CBlockIndex *pindexWalk = pindexLast; // Calculate all the blocks we'd need to switch to pindexLast, // up to a limit. while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) { // We don't have this block, and it's not yet in flight. vToFetch.push_back(pindexWalk); } pindexWalk = pindexWalk->pprev; } // If pindexWalk still isn't on our main chain, we're looking at // a very large reorg at a time we think we're close to caught // up to the main chain -- this shouldn't really happen. Bail // out on the direct fetch and rely on parallel download // instead. if (!chainActive.Contains(pindexWalk)) { LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } else { std::vector vGetData; // Download as much as possible, from earliest to latest. for (const CBlockIndex *pindex : boost::adaptors::reverse(vToFetch)) { if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { // Can't download any more from this peer break; } uint32_t nFetchFlags = GetFetchFlags( pfrom, pindex->pprev, chainparams.GetConsensus()); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); MarkBlockAsInFlight(config, pfrom->GetId(), pindex->GetBlockHash(), chainparams.GetConsensus(), pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom->id); } if (vGetData.size() > 1) { LogPrint(BCLog::NET, "Downloading blocks toward %s " "(%d) via headers direct fetch\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } if (vGetData.size() > 0) { if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) { // In any case, we want to download using a compact // block, not a regular one. vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } connman.PushMessage( pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } } } } } else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing { std::shared_ptr pblock = std::make_shared(); vRecv >> *pblock; LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->id); // Process all blocks from whitelisted peers, even if not requested, // unless we're still syncing with the network. Such an unrequested // block may still be processed, subject to the conditions in // AcceptBlock(). bool forceProcessing = pfrom->fWhitelisted && !IsInitialBlockDownload(); const uint256 hash(pblock->GetHash()); { LOCK(cs_main); // Also always process if we requested the block explicitly, as we // may need it even though it is not a candidate for a new best tip. forceProcessing |= MarkBlockAsReceived(hash); // mapBlockSource is only used for sending reject messages and DoS // scores, so the race between here and cs_main in ProcessNewBlock // is fine. mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true)); } bool fNewBlock = false; ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock); if (fNewBlock) { pfrom->nLastBlockTime = GetTime(); } } else if (strCommand == NetMsgType::GETADDR) { // This asymmetric behavior for inbound and outbound connections was // introduced to prevent a fingerprinting attack: an attacker can send // specific fake addresses to users' AddrMan and later request them by // sending getaddr messages. Making nodes which are behind NAT and can // only make outgoing connections ignore the getaddr message mitigates // the attack. if (!pfrom->fInbound) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->id); return true; } // Only send one GetAddr response per connection to reduce resource // waste and discourage addr stamping of INV announcements. if (pfrom->fSentAddr) { LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->id); return true; } pfrom->fSentAddr = true; pfrom->vAddrToSend.clear(); std::vector vAddr = connman.GetAddresses(); FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { pfrom->PushAddress(addr, insecure_rand); } } else if (strCommand == NetMsgType::MEMPOOL) { if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "mempool request with bloom filters disabled, " "disconnect peer=%d\n", pfrom->GetId()); pfrom->fDisconnect = true; return true; } if (connman.OutboundTargetReached(false) && !pfrom->fWhitelisted) { LogPrint(BCLog::NET, "mempool request with bandwidth limit " "reached, disconnect peer=%d\n", pfrom->GetId()); pfrom->fDisconnect = true; return true; } LOCK(pfrom->cs_inventory); pfrom->fSendMempool = true; } else if (strCommand == NetMsgType::PING) { if (pfrom->nVersion > BIP0031_VERSION) { uint64_t nonce = 0; vRecv >> nonce; // Echo the message back with the nonce. This allows for two useful // features: // // 1) A remote node can quickly check if the connection is // operational. // 2) Remote nodes can measure the latency of the network thread. If // this node is overloaded it won't respond to pings quickly and the // remote node can avoid sending us more work, like chain download // requests. // // The nonce stops the remote getting confused between different // pings: without it, if the remote node sends a ping once per // second and this node takes 5 seconds to respond to each, the 5th // ping the remote sends would appear to return very quickly. connman.PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); } } else if (strCommand == NetMsgType::PONG) { int64_t pingUsecEnd = nTimeReceived; uint64_t nonce = 0; size_t nAvail = vRecv.in_avail(); bool bPingFinished = false; std::string sProblem; if (nAvail >= sizeof(nonce)) { vRecv >> nonce; // Only process pong message if there is an outstanding ping (old // ping without nonce should never pong) if (pfrom->nPingNonceSent != 0) { if (nonce == pfrom->nPingNonceSent) { // Matching pong received, this ping is no longer // outstanding bPingFinished = true; int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart; if (pingUsecTime > 0) { // Successful ping time measurement, replace previous pfrom->nPingUsecTime = pingUsecTime; pfrom->nMinPingUsecTime = std::min( pfrom->nMinPingUsecTime.load(), pingUsecTime); } else { // This should never happen sProblem = "Timing mishap"; } } else { // Nonce mismatches are normal when pings are overlapping sProblem = "Nonce mismatch"; if (nonce == 0) { // This is most likely a bug in another implementation // somewhere; cancel this ping bPingFinished = true; sProblem = "Nonce zero"; } } } else { sProblem = "Unsolicited pong without ping"; } } else { // This is most likely a bug in another implementation somewhere; // cancel this ping bPingFinished = true; sProblem = "Short payload"; } if (!(sProblem.empty())) { LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", pfrom->id, sProblem, pfrom->nPingNonceSent, nonce, nAvail); } if (bPingFinished) { pfrom->nPingNonceSent = 0; } } else if (strCommand == NetMsgType::FILTERLOAD) { CBloomFilter filter; vRecv >> filter; if (!filter.IsWithinSizeConstraints()) { // There is no excuse for sending a too-large filter LOCK(cs_main); Misbehaving(pfrom, 100, "oversized-bloom-filter"); } else { LOCK(pfrom->cs_filter); delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(filter); pfrom->pfilter->UpdateEmptyFull(); pfrom->fRelayTxes = true; } } else if (strCommand == NetMsgType::FILTERADD) { std::vector vData; vRecv >> vData; // Nodes must NEVER send a data item > 520 bytes (the max size for a // script data object, and thus, the maximum size any matched object can // have) in a filteradd message. bool bad = false; if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { bad = true; } else { LOCK(pfrom->cs_filter); if (pfrom->pfilter) { pfrom->pfilter->insert(vData); } else { bad = true; } } if (bad) { LOCK(cs_main); // The structure of this code doesn't really allow for a good error // code. We'll go generic. Misbehaving(pfrom, 100, "invalid-filteradd"); } } else if (strCommand == NetMsgType::FILTERCLEAR) { LOCK(pfrom->cs_filter); if (pfrom->GetLocalServices() & NODE_BLOOM) { delete pfrom->pfilter; pfrom->pfilter = new CBloomFilter(); } pfrom->fRelayTxes = true; } else if (strCommand == NetMsgType::FEEFILTER) { Amount newFeeFilter(0); vRecv >> newFeeFilter; if (MoneyRange(newFeeFilter)) { { LOCK(pfrom->cs_feeFilter); pfrom->minFeeFilter = newFeeFilter; } LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->id); } } else if (strCommand == NetMsgType::NOTFOUND) { // We do not care about the NOTFOUND message, but logging an Unknown // Command message would be undesirable as we transmit it ourselves. } else { // Ignore unknown commands for extensibility LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id); } return true; } static bool SendRejectsAndCheckIfBanned(CNode *pnode, CConnman &connman) { AssertLockHeld(cs_main); CNodeState &state = *State(pnode->GetId()); for (const CBlockReject &reject : state.rejects) { connman.PushMessage( pnode, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, std::string(NetMsgType::BLOCK), reject.chRejectCode, reject.strRejectReason, reject.hashBlock)); } state.rejects.clear(); if (state.fShouldBan) { state.fShouldBan = false; if (pnode->fWhitelisted) { LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString()); } else if (pnode->fAddnode) { LogPrintf("Warning: not punishing addnoded peer %s!\n", pnode->addr.ToString()); } else { pnode->fDisconnect = true; if (pnode->addr.IsLocal()) { LogPrintf("Warning: not banning local peer %s!\n", pnode->addr.ToString()); } else { connman.Ban(pnode->addr, BanReasonNodeMisbehaving); } } return true; } return false; } bool ProcessMessages(const Config &config, CNode *pfrom, CConnman &connman, const std::atomic &interruptMsgProc) { - const CChainParams &chainparams = Params(); + const CChainParams &chainparams = config.GetChainParams(); // // Message format // (4) message start // (12) command // (4) size // (4) checksum // (x) data // bool fMoreWork = false; if (!pfrom->vRecvGetData.empty()) { ProcessGetData(config, pfrom, chainparams.GetConsensus(), connman, interruptMsgProc); } if (pfrom->fDisconnect) { return false; } // this maintains the order of responses if (!pfrom->vRecvGetData.empty()) { return true; } // Don't bother if send buffer is too full to respond anyway if (pfrom->fPauseSend) { return false; } std::list msgs; { LOCK(pfrom->cs_vProcessMsg); if (pfrom->vProcessMsg.empty()) { return false; } // Just take one message msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin()); pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE; pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman.GetReceiveFloodSize(); fMoreWork = !pfrom->vProcessMsg.empty(); } CNetMessage &msg(msgs.front()); msg.SetVersion(pfrom->GetRecvVersion()); // Scan for message start if (memcmp(std::begin(msg.hdr.pchMessageStart), std::begin(chainparams.NetMagic()), CMessageHeader::MESSAGE_START_SIZE) != 0) { LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->id); pfrom->fDisconnect = true; return false; } // Read header CMessageHeader &hdr = msg.hdr; if (!hdr.IsValid(chainparams.NetMagic())) { LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->id); return fMoreWork; } std::string strCommand = hdr.GetCommand(); // Message size unsigned int nMessageSize = hdr.nMessageSize; // Checksum CDataStream &vRecv = msg.vRecv; const uint256 &hash = msg.GetMessageHash(); if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { LogPrintf( "%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__, SanitizeString(strCommand), nMessageSize, HexStr(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE), HexStr(hdr.pchChecksum, hdr.pchChecksum + CMessageHeader::CHECKSUM_SIZE)); return fMoreWork; } // Process message bool fRet = false; try { fRet = ProcessMessage(config, pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc); if (interruptMsgProc) { return false; } if (!pfrom->vRecvGetData.empty()) { fMoreWork = true; } } catch (const std::ios_base::failure &e) { connman.PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message"))); if (strstr(e.what(), "end of data")) { // Allow exceptions from under-length message on vRecv LogPrintf( "%s(%s, %u bytes): Exception '%s' caught, normally caused by a " "message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); } else if (strstr(e.what(), "size too large")) { // Allow exceptions from over-long size LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); } else if (strstr(e.what(), "non-canonical ReadCompactSize()")) { // Allow exceptions from non-canonical encoding LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what()); } else { PrintExceptionContinue(&e, "ProcessMessages()"); } } catch (const std::exception &e) { PrintExceptionContinue(&e, "ProcessMessages()"); } catch (...) { PrintExceptionContinue(nullptr, "ProcessMessages()"); } if (!fRet) { LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->id); } LOCK(cs_main); SendRejectsAndCheckIfBanned(pfrom, connman); return fMoreWork; } class CompareInvMempoolOrder { CTxMemPool *mp; public: CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; } bool operator()(std::set::iterator a, std::set::iterator b) { /* As std::make_heap produces a max-heap, we want the entries with the * fewest ancestors/highest fee to sort later. */ return mp->CompareDepthAndScore(*b, *a); } }; bool SendMessages(const Config &config, CNode *pto, CConnman &connman, const std::atomic &interruptMsgProc) { - const Consensus::Params &consensusParams = Params().GetConsensus(); + const Consensus::Params &consensusParams = + config.GetChainParams().GetConsensus(); // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) { return true; } // If we get here, the outgoing message serialization version is set and // can't change. const CNetMsgMaker msgMaker(pto->GetSendVersion()); // // Message: ping // bool pingSend = false; if (pto->fPingQueued) { // RPC ping request by user pingSend = true; } if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) { // Ping automatically sent as a latency probe & keepalive. pingSend = true; } if (pingSend) { uint64_t nonce = 0; while (nonce == 0) { GetRandBytes((uint8_t *)&nonce, sizeof(nonce)); } pto->fPingQueued = false; pto->nPingUsecStart = GetTimeMicros(); if (pto->nVersion > BIP0031_VERSION) { pto->nPingNonceSent = nonce; connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); } else { // Peer is too old to support ping command with nonce, pong will // never arrive. pto->nPingNonceSent = 0; connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); } } // Acquire cs_main for IsInitialBlockDownload() and CNodeState() TRY_LOCK(cs_main, lockMain); if (!lockMain) { return true; } if (SendRejectsAndCheckIfBanned(pto, connman)) { return true; } CNodeState &state = *State(pto->GetId()); // Address refresh broadcast int64_t nNow = GetTimeMicros(); if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) { AdvertiseLocal(pto); pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // // Message: addr // if (pto->nNextAddrSend < nNow) { pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL); std::vector vAddr; vAddr.reserve(pto->vAddrToSend.size()); for (const CAddress &addr : pto->vAddrToSend) { if (!pto->addrKnown.contains(addr.GetKey())) { pto->addrKnown.insert(addr.GetKey()); vAddr.push_back(addr); // receiver rejects addr messages larger than 1000 if (vAddr.size() >= 1000) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); vAddr.clear(); } } } pto->vAddrToSend.clear(); if (!vAddr.empty()) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); } // we only send the big addr message once if (pto->vAddrToSend.capacity() > 40) { pto->vAddrToSend.shrink_to_fit(); } } // Start block sync if (pindexBestHeader == nullptr) { pindexBestHeader = chainActive.Tip(); } // Download if this is a nice peer, or we have no nice peers and this one // might do. bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close // to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { state.fSyncStarted = true; nSyncStarted++; const CBlockIndex *pindexStart = pindexBestHeader; /** * If possible, start at the block preceding the currently best * known header. This ensures that we always get a non-empty list of * headers back as long as the peer is up-to-date. With a non-empty * response, we can initialise the peer's known best block. This * wouldn't be possible if we requested starting at pindexBestHeader * and got back an empty response. */ if (pindexStart->pprev) { pindexStart = pindexStart->pprev; } LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->id, pto->nStartingHeight); connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256())); } } // Resend wallet transactions that haven't gotten in a block yet // Except during reindex, importing and IBD, when old wallet transactions // become unconfirmed and spams other nodes. if (!fReindex && !fImporting && !IsInitialBlockDownload()) { GetMainSignals().Broadcast(nTimeBestReceived, &connman); } // // Try sending block announcements via headers // { // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block // hashes we're relaying, and our peer wants headers announcements, then // find the first header not yet known to our peer but would connect, // and send. If no header would connect, or if we have too many blocks, // or if the peer doesn't want headers, just add all to the inv queue. LOCK(pto->cs_inventory); std::vector vHeaders; bool fRevertToInv = ((!state.fPreferHeaders && (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); // last header queued for delivery const CBlockIndex *pBestIndex = nullptr; // ensure pindexBestKnownBlock is up-to-date ProcessBlockAvailability(pto->id); if (!fRevertToInv) { bool fFoundStartingHeader = false; // Try to find first header that our peer doesn't have, and then // send all headers past that one. If we come across an headers that // aren't on chainActive, give up. for (const uint256 &hash : pto->vBlockHashesToAnnounce) { BlockMap::iterator mi = mapBlockIndex.find(hash); assert(mi != mapBlockIndex.end()); const CBlockIndex *pindex = mi->second; if (chainActive[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block fRevertToInv = true; break; } if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { // This means that the list of blocks to announce don't // connect to each other. This shouldn't really be possible // to hit during regular operation (because reorgs should // take us to a chain that has some block not on the prior // chain, which should be caught by the prior check), but // one way this could happen is by using invalidateblock / // reconsiderblock repeatedly on the tip, causing it to be // added multiple times to vBlockHashesToAnnounce. Robustly // deal with this rare situation by reverting to an inv. fRevertToInv = true; break; } pBestIndex = pindex; if (fFoundStartingHeader) { // add this to the headers message vHeaders.push_back(pindex->GetBlockHeader()); } else if (PeerHasHeader(&state, pindex)) { // Keep looking for the first new block. continue; } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { // Peer doesn't have this header but they do have the prior // one. // Start sending headers. fFoundStartingHeader = true; vHeaders.push_back(pindex->GetBlockHeader()); } else { // Peer doesn't have this header or the prior one -- // nothing will connect, so bail out. fRevertToInv = true; break; } } } if (!fRevertToInv && !vHeaders.empty()) { if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { // We only send up to 1 block as header-and-ids, as otherwise // probably means we're doing an initial-ish-sync or they're // slow. LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->id); int nSendFlags = 0; bool fGotBlockFromCache = false; { LOCK(cs_most_recent_block); if (most_recent_block_hash == pBestIndex->GetBlockHash()) { CBlockHeaderAndShortTxIDs cmpctblock( *most_recent_block); connman.PushMessage( pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); fGotBlockFromCache = true; } } if (!fGotBlockFromCache) { CBlock block; bool ret = ReadBlockFromDisk(block, pBestIndex, config); assert(ret); CBlockHeaderAndShortTxIDs cmpctblock(block); connman.PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } state.pindexBestHeaderSent = pBestIndex; } else if (state.fPreferHeaders) { if (vHeaders.size() > 1) { LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, vHeaders.size(), vHeaders.front().GetHash().ToString(), vHeaders.back().GetHash().ToString(), pto->id); } else { LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->id); } connman.PushMessage( pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); state.pindexBestHeaderSent = pBestIndex; } else { fRevertToInv = true; } } if (fRevertToInv) { // If falling back to using an inv, just try to inv the tip. The // last entry in vBlockHashesToAnnounce was our tip at some point in // the past. if (!pto->vBlockHashesToAnnounce.empty()) { const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce); assert(mi != mapBlockIndex.end()); const CBlockIndex *pindex = mi->second; // Warn if we're announcing a block that is not on the main // chain. This should be very rare and could be optimized out. // Just log for now. if (chainActive[pindex->nHeight] != pindex) { LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString()); } // If the peer's chain has this block, don't inv it back. if (!PeerHasHeader(&state, pindex)) { pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce)); LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, pto->id, hashToAnnounce.ToString()); } } } pto->vBlockHashesToAnnounce.clear(); } // // Message: inventory // std::vector vInv; { LOCK(pto->cs_inventory); vInv.reserve(std::max(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX)); // Add blocks for (const uint256 &hash : pto->vInventoryBlockToSend) { vInv.push_back(CInv(MSG_BLOCK, hash)); if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->vInventoryBlockToSend.clear(); // Check whether periodic sends should happen bool fSendTrickle = pto->fWhitelisted; if (pto->nNextInvSend < nNow) { fSendTrickle = true; // Use half the delay for outbound peers, as there is less privacy // concern for them. pto->nNextInvSend = PoissonNextSend( nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound); } // Time to send but the peer has requested we not relay transactions. if (fSendTrickle) { LOCK(pto->cs_filter); if (!pto->fRelayTxes) { pto->setInventoryTxToSend.clear(); } } // Respond to BIP35 mempool requests if (fSendTrickle && pto->fSendMempool) { auto vtxinfo = mempool.infoAll(); pto->fSendMempool = false; Amount filterrate(0); { LOCK(pto->cs_feeFilter); filterrate = pto->minFeeFilter; } LOCK(pto->cs_filter); for (const auto &txinfo : vtxinfo) { const uint256 &txid = txinfo.tx->GetId(); CInv inv(MSG_TX, txid); pto->setInventoryTxToSend.erase(txid); if (filterrate != Amount(0)) { if (txinfo.feeRate.GetFeePerK() < filterrate) { continue; } } if (pto->pfilter) { if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) { continue; } } pto->filterInventoryKnown.insert(txid); vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->timeLastMempoolReq = GetTime(); } // Determine transactions to relay if (fSendTrickle) { // Produce a vector with all candidates for sending std::vector::iterator> vInvTx; vInvTx.reserve(pto->setInventoryTxToSend.size()); for (std::set::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) { vInvTx.push_back(it); } Amount filterrate(0); { LOCK(pto->cs_feeFilter); filterrate = pto->minFeeFilter; } // Topologically and fee-rate sort the inventory we send for privacy // and priority reasons. A heap is used so that not all items need // sorting if only a few are being sent. CompareInvMempoolOrder compareInvMempoolOrder(&mempool); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's capacity, // especially since we have many peers and some will draw much // shorter delays. unsigned int nRelayedTransactions = 0; LOCK(pto->cs_filter); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) { // Fetch the top element from the heap std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); std::set::iterator it = vInvTx.back(); vInvTx.pop_back(); uint256 hash = *it; // Remove it from the to-be-sent set pto->setInventoryTxToSend.erase(it); // Check if not in the filter already if (pto->filterInventoryKnown.contains(hash)) { continue; } // Not in the mempool anymore? don't bother sending it. auto txinfo = mempool.info(hash); if (!txinfo.tx) { continue; } if (filterrate != Amount(0) && txinfo.feeRate.GetFeePerK() < filterrate) { continue; } if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) { continue; } // Send vInv.push_back(CInv(MSG_TX, hash)); nRelayedTransactions++; { // Expire old relay messages while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow) { mapRelay.erase(vRelayExpiration.front().second); vRelayExpiration.pop_front(); } auto ret = mapRelay.insert( std::make_pair(hash, std::move(txinfo.tx))); if (ret.second) { vRelayExpiration.push_back(std::make_pair( nNow + 15 * 60 * 1000000, ret.first)); } } if (vInv.size() == MAX_INV_SZ) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } pto->filterInventoryKnown.insert(hash); } } } if (!vInv.empty()) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); } // Detect whether we're stalling nNow = GetTimeMicros(); if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { // Stalling only triggers when the block download window cannot move. // During normal steady state, the download window should be much larger // than the to-be-downloaded set of blocks, so disconnection should only // happen during initial block download. LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->id); pto->fDisconnect = true; return true; } // In case there is a block that has been in flight from this peer for 2 + // 0.5 * N times the block interval (with N the number of peers from which // we're downloading validated blocks), disconnect due to timeout. We // compensate for other peers to prevent killing off peers due to our own // downstream link being saturated. We only count validated in-flight blocks // so peers can't advertise non-existing block hashes to unreasonably // increase our timeout. if (state.vBlocksInFlight.size() > 0) { QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { LogPrintf("Timeout downloading block %s from peer=%d, " "disconnecting\n", queuedBlock.hash.ToString(), pto->id); pto->fDisconnect = true; return true; } } // // Message: getdata (blocks) // std::vector vGetData; if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams); vGetData.push_back( CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); MarkBlockAsInFlight(config, pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->id); } if (state.nBlocksInFlight == 0 && staller != -1) { if (State(staller)->nStallingSince == 0) { State(staller)->nStallingSince = nNow; LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); } } } // // Message: getdata (non-blocks) // while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow) { const CInv &inv = (*pto->mapAskFor.begin()).second; if (!AlreadyHave(inv)) { LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->id); vGetData.push_back(inv); if (vGetData.size() >= 1000) { connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); } } else { // If we're not going to ask, don't expect a response. pto->setAskFor.erase(inv.hash); } pto->mapAskFor.erase(pto->mapAskFor.begin()); } if (!vGetData.empty()) { connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } // // Message: feefilter // // We don't want white listed peers to filter txs to us if we have // -whitelistforcerelay if (pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && !(pto->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) { Amount currentFilter = mempool .GetMinFee( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); int64_t timeNow = GetTimeMicros(); if (timeNow > pto->nextSendTimeFeeFilter) { static CFeeRate default_feerate = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE); static FeeFilterRounder filterRounder(default_feerate); Amount filterToSend = filterRounder.round(currentFilter); // If we don't allow free transactions, then we always have a fee // filter of at least minRelayTxFee if (gArgs.GetArg("-limitfreerelay", DEFAULT_LIMITFREERELAY) <= 0) { filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK()); } if (filterToSend != pto->lastSentFeeFilter) { connman.PushMessage( pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend)); pto->lastSentFeeFilter = filterToSend; } pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more than // MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then move the // broadcast to within MAX_FEEFILTER_CHANGE_DELAY. else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter && (currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) { pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; } } return true; } class CNetProcessingCleanup { public: CNetProcessingCleanup() {} ~CNetProcessingCleanup() { // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); } } instance_of_cnetprocessingcleanup; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f47c98a15..07e2d6ad1 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1,1762 +1,1764 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/blockchain.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "checkpoints.h" #include "coins.h" #include "config.h" #include "consensus/validation.h" #include "hash.h" #include "policy/policy.h" #include "primitives/transaction.h" #include "rpc/server.h" #include "rpc/tojson.h" #include "streams.h" #include "sync.h" #include "txmempool.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include // boost::thread::interrupt #include #include #include struct CUpdatedBlock { uint256 hash; int height; }; static std::mutex cs_blockchange; static std::condition_variable cond_blockchange; static CUpdatedBlock latestblock; static double GetDifficultyFromBits(uint32_t nBits) { int nShift = (nBits >> 24) & 0xff; double dDiff = 0x0000ffff / double(nBits & 0x00ffffff); while (nShift < 29) { dDiff *= 256.0; nShift++; } while (nShift > 29) { dDiff /= 256.0; nShift--; } return dDiff; } double GetDifficulty(const CBlockIndex *blockindex) { // Floating point number that is a multiple of the minimum difficulty, // minimum difficulty = 1.0. if (blockindex == nullptr) { return 1.0; } return GetDifficultyFromBits(blockindex->nBits); } UniValue blockheaderToJSON(const CBlockIndex *blockindex) { UniValue result(UniValue::VOBJ); result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex())); int confirmations = -1; // Only report confirmations if the block is on the main chain if (chainActive.Contains(blockindex)) { confirmations = chainActive.Height() - blockindex->nHeight + 1; } result.push_back(Pair("confirmations", confirmations)); result.push_back(Pair("height", blockindex->nHeight)); result.push_back(Pair("version", blockindex->nVersion)); result.push_back( Pair("versionHex", strprintf("%08x", blockindex->nVersion))); result.push_back(Pair("merkleroot", blockindex->hashMerkleRoot.GetHex())); result.push_back(Pair("time", int64_t(blockindex->nTime))); result.push_back( Pair("mediantime", int64_t(blockindex->GetMedianTimePast()))); result.push_back(Pair("nonce", uint64_t(blockindex->nNonce))); result.push_back(Pair("bits", strprintf("%08x", blockindex->nBits))); result.push_back(Pair("difficulty", GetDifficulty(blockindex))); result.push_back(Pair("chainwork", blockindex->nChainWork.GetHex())); if (blockindex->pprev) { result.push_back(Pair("previousblockhash", blockindex->pprev->GetBlockHash().GetHex())); } CBlockIndex *pnext = chainActive.Next(blockindex); if (pnext) { result.push_back(Pair("nextblockhash", pnext->GetBlockHash().GetHex())); } return result; } UniValue blockToJSON(const Config &config, const CBlock &block, const CBlockIndex *blockindex, bool txDetails) { UniValue result(UniValue::VOBJ); result.push_back(Pair("hash", blockindex->GetBlockHash().GetHex())); int confirmations = -1; // Only report confirmations if the block is on the main chain if (chainActive.Contains(blockindex)) { confirmations = chainActive.Height() - blockindex->nHeight + 1; } result.push_back(Pair("confirmations", confirmations)); result.push_back(Pair( "size", (int)::GetSerializeSize(block, SER_NETWORK, PROTOCOL_VERSION))); result.push_back(Pair("height", blockindex->nHeight)); result.push_back(Pair("version", block.nVersion)); result.push_back(Pair("versionHex", strprintf("%08x", block.nVersion))); result.push_back(Pair("merkleroot", block.hashMerkleRoot.GetHex())); UniValue txs(UniValue::VARR); for (const auto &tx : block.vtx) { if (txDetails) { UniValue objTx(UniValue::VOBJ); TxToJSON(config, *tx, uint256(), objTx); txs.push_back(objTx); } else { txs.push_back(tx->GetId().GetHex()); } } result.push_back(Pair("tx", txs)); result.push_back(Pair("time", block.GetBlockTime())); result.push_back( Pair("mediantime", int64_t(blockindex->GetMedianTimePast()))); result.push_back(Pair("nonce", uint64_t(block.nNonce))); result.push_back(Pair("bits", strprintf("%08x", block.nBits))); result.push_back(Pair("difficulty", GetDifficulty(blockindex))); result.push_back(Pair("chainwork", blockindex->nChainWork.GetHex())); if (blockindex->pprev) { result.push_back(Pair("previousblockhash", blockindex->pprev->GetBlockHash().GetHex())); } CBlockIndex *pnext = chainActive.Next(blockindex); if (pnext) { result.push_back(Pair("nextblockhash", pnext->GetBlockHash().GetHex())); } return result; } UniValue getblockcount(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockcount\n" "\nReturns the number of blocks in the longest blockchain.\n" "\nResult:\n" "n (numeric) The current block count\n" "\nExamples:\n" + HelpExampleCli("getblockcount", "") + HelpExampleRpc("getblockcount", "")); } LOCK(cs_main); return chainActive.Height(); } UniValue getbestblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getbestblockhash\n" "\nReturns the hash of the best (tip) block in the " "longest blockchain.\n" "\nResult:\n" "\"hex\" (string) the block hash hex encoded\n" "\nExamples:\n" + HelpExampleCli("getbestblockhash", "") + HelpExampleRpc("getbestblockhash", "")); } LOCK(cs_main); return chainActive.Tip()->GetBlockHash().GetHex(); } void RPCNotifyBlockChange(bool ibd, const CBlockIndex *pindex) { if (pindex) { std::lock_guard lock(cs_blockchange); latestblock.hash = pindex->GetBlockHash(); latestblock.height = pindex->nHeight; } cond_blockchange.notify_all(); } UniValue waitfornewblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "waitfornewblock (timeout)\n" "\nWaits for a specific new block and returns " "useful info about it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. timeout (int, optional, default=0) Time in " "milliseconds to wait for a response. 0 indicates " "no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitfornewblock", "1000") + HelpExampleRpc("waitfornewblock", "1000")); } int timeout = 0; if (request.params.size() > 0) { timeout = request.params[0].get_int(); } CUpdatedBlock block; { std::unique_lock lock(cs_blockchange); block = latestblock; if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&block] { return latestblock.height != block.height || latestblock.hash != block.hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.push_back(Pair("hash", block.hash.GetHex())); ret.push_back(Pair("height", block.height)); return ret; } UniValue waitforblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblock (timeout)\n" "\nWaits for a specific new block and returns useful info about " "it.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. \"blockhash\" (required, string) Block hash to wait for.\n" "2. timeout (int, optional, default=0) Time in milliseconds " "to wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000") + HelpExampleRpc("waitforblock", "\"0000000000079f8ef3d2c688c244eb7a4" "570b24c9ed7b4a8c619eb02596f8862\", " "1000")); } int timeout = 0; uint256 hash = uint256S(request.params[0].get_str()); if (request.params.size() > 1) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { std::unique_lock lock(cs_blockchange); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&hash] { return latestblock.hash == hash || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.push_back(Pair("hash", block.hash.GetHex())); ret.push_back(Pair("height", block.height)); return ret; } UniValue waitforblockheight(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "waitforblockheight (timeout)\n" "\nWaits for (at least) block height and returns the height and " "hash\n" "of the current tip.\n" "\nReturns the current block on timeout or exit.\n" "\nArguments:\n" "1. height (required, int) Block height to wait for (int)\n" "2. timeout (int, optional, default=0) Time in milliseconds to " "wait for a response. 0 indicates no timeout.\n" "\nResult:\n" "{ (json object)\n" " \"hash\" : { (string) The blockhash\n" " \"height\" : { (int) Block height\n" "}\n" "\nExamples:\n" + HelpExampleCli("waitforblockheight", "\"100\", 1000") + HelpExampleRpc("waitforblockheight", "\"100\", 1000")); } int timeout = 0; int height = request.params[0].get_int(); if (request.params.size() > 1) { timeout = request.params[1].get_int(); } CUpdatedBlock block; { std::unique_lock lock(cs_blockchange); if (timeout) { cond_blockchange.wait_for( lock, std::chrono::milliseconds(timeout), [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } else { cond_blockchange.wait(lock, [&height] { return latestblock.height >= height || !IsRPCRunning(); }); } block = latestblock; } UniValue ret(UniValue::VOBJ); ret.push_back(Pair("hash", block.hash.GetHex())); ret.push_back(Pair("height", block.height)); return ret; } UniValue getdifficulty(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error("getdifficulty\n" "\nReturns the proof-of-work difficulty as a " "multiple of the minimum difficulty.\n" "\nResult:\n" "n.nnn (numeric) the proof-of-work " "difficulty as a multiple of the minimum " "difficulty.\n" "\nExamples:\n" + HelpExampleCli("getdifficulty", "") + HelpExampleRpc("getdifficulty", "")); } LOCK(cs_main); return GetDifficulty(chainActive.Tip()); } std::string EntryDescriptionString() { return " \"size\" : n, (numeric) transaction size.\n" " \"fee\" : n, (numeric) transaction fee in " + CURRENCY_UNIT + "\n" " \"modifiedfee\" : n, (numeric) transaction fee with fee " "deltas used for mining priority\n" " \"time\" : n, (numeric) local time transaction " "entered pool in seconds since 1 Jan 1970 GMT\n" " \"height\" : n, (numeric) block height when " "transaction entered pool\n" " \"startingpriority\" : n, (numeric) DEPRECATED. Priority when " "transaction entered pool\n" " \"currentpriority\" : n, (numeric) DEPRECATED. Transaction " "priority now\n" " \"descendantcount\" : n, (numeric) number of in-mempool " "descendant transactions (including this one)\n" " \"descendantsize\" : n, (numeric) virtual transaction size " "of in-mempool descendants (including this one)\n" " \"descendantfees\" : n, (numeric) modified fees (see above) " "of in-mempool descendants (including this one)\n" " \"ancestorcount\" : n, (numeric) number of in-mempool " "ancestor transactions (including this one)\n" " \"ancestorsize\" : n, (numeric) virtual transaction size " "of in-mempool ancestors (including this one)\n" " \"ancestorfees\" : n, (numeric) modified fees (see above) " "of in-mempool ancestors (including this one)\n" " \"depends\" : [ (array) unconfirmed transactions " "used as inputs for this transaction\n" " \"transactionid\", (string) parent transaction id\n" " ... ]\n"; } void entryToJSON(UniValue &info, const CTxMemPoolEntry &e) { AssertLockHeld(mempool.cs); info.push_back(Pair("size", (int)e.GetTxSize())); info.push_back(Pair("fee", ValueFromAmount(e.GetFee()))); info.push_back(Pair("modifiedfee", ValueFromAmount(e.GetModifiedFee()))); info.push_back(Pair("time", e.GetTime())); info.push_back(Pair("height", (int)e.GetHeight())); info.push_back(Pair("startingpriority", e.GetPriority(e.GetHeight()))); info.push_back( Pair("currentpriority", e.GetPriority(chainActive.Height()))); info.push_back(Pair("descendantcount", e.GetCountWithDescendants())); info.push_back(Pair("descendantsize", e.GetSizeWithDescendants())); info.push_back( Pair("descendantfees", e.GetModFeesWithDescendants().GetSatoshis())); info.push_back(Pair("ancestorcount", e.GetCountWithAncestors())); info.push_back(Pair("ancestorsize", e.GetSizeWithAncestors())); info.push_back( Pair("ancestorfees", e.GetModFeesWithAncestors().GetSatoshis())); const CTransaction &tx = e.GetTx(); std::set setDepends; for (const CTxIn &txin : tx.vin) { if (mempool.exists(txin.prevout.hash)) { setDepends.insert(txin.prevout.hash.ToString()); } } UniValue depends(UniValue::VARR); for (const std::string &dep : setDepends) { depends.push_back(dep); } info.push_back(Pair("depends", depends)); } UniValue mempoolToJSON(bool fVerbose = false) { if (fVerbose) { LOCK(mempool.cs); UniValue o(UniValue::VOBJ); for (const CTxMemPoolEntry &e : mempool.mapTx) { const uint256 &txid = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.push_back(Pair(txid.ToString(), info)); } return o; } else { std::vector vtxids; mempool.queryHashes(vtxids); UniValue a(UniValue::VARR); for (const uint256 &txid : vtxids) { a.push_back(txid.ToString()); } return a; } } UniValue getrawmempool(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getrawmempool ( verbose )\n" "\nReturns all transaction ids in memory pool as a json array of " "string transaction ids.\n" "\nArguments:\n" "1. verbose (boolean, optional, default=false) True for a json " "object, false for array of transaction ids\n" "\nResult: (for verbose = false):\n" "[ (json array of string)\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" "]\n" "\nResult: (for verbose = true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getrawmempool", "true") + HelpExampleRpc("getrawmempool", "true")); } bool fVerbose = false; if (request.params.size() > 0) { fVerbose = request.params[0].get_bool(); } return mempoolToJSON(fVerbose); } UniValue getmempoolancestors(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempoolancestors txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool ancestors.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool ancestor transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolancestors", "\"mytxid\"") + HelpExampleRpc("getmempoolancestors", "\"mytxid\"")); } bool fVerbose = false; if (request.params.size() > 1) { fVerbose = request.params[1].get_bool(); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(mempool.cs); CTxMemPool::txiter it = mempool.mapTx.find(hash); if (it == mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setAncestors; uint64_t noLimit = std::numeric_limits::max(); std::string dummy; mempool.CalculateMemPoolAncestors(*it, setAncestors, noLimit, noLimit, noLimit, noLimit, dummy, false); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter ancestorIt : setAncestors) { o.push_back(ancestorIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter ancestorIt : setAncestors) { const CTxMemPoolEntry &e = *ancestorIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.push_back(Pair(_hash.ToString(), info)); } return o; } } UniValue getmempooldescendants(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getmempooldescendants txid (verbose)\n" "\nIf txid is in the mempool, returns all in-mempool descendants.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id " "(must be in mempool)\n" "2. verbose (boolean, optional, default=false) " "True for a json object, false for array of transaction ids\n" "\nResult (for verbose=false):\n" "[ (json array of strings)\n" " \"transactionid\" (string) The transaction id of an " "in-mempool descendant transaction\n" " ,...\n" "]\n" "\nResult (for verbose=true):\n" "{ (json object)\n" " \"transactionid\" : { (json object)\n" + EntryDescriptionString() + " }, ...\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempooldescendants", "\"mytxid\"") + HelpExampleRpc("getmempooldescendants", "\"mytxid\"")); } bool fVerbose = false; if (request.params.size() > 1) fVerbose = request.params[1].get_bool(); uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(mempool.cs); CTxMemPool::txiter it = mempool.mapTx.find(hash); if (it == mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } CTxMemPool::setEntries setDescendants; mempool.CalculateDescendants(it, setDescendants); // CTxMemPool::CalculateDescendants will include the given tx setDescendants.erase(it); if (!fVerbose) { UniValue o(UniValue::VARR); for (CTxMemPool::txiter descendantIt : setDescendants) { o.push_back(descendantIt->GetTx().GetId().ToString()); } return o; } else { UniValue o(UniValue::VOBJ); for (CTxMemPool::txiter descendantIt : setDescendants) { const CTxMemPoolEntry &e = *descendantIt; const uint256 &_hash = e.GetTx().GetId(); UniValue info(UniValue::VOBJ); entryToJSON(info, e); o.push_back(Pair(_hash.ToString(), info)); } return o; } } UniValue getmempoolentry(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getmempoolentry txid\n" "\nReturns mempool data for given transaction\n" "\nArguments:\n" "1. \"txid\" (string, required) " "The transaction id (must be in mempool)\n" "\nResult:\n" "{ (json object)\n" + EntryDescriptionString() + "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolentry", "\"mytxid\"") + HelpExampleRpc("getmempoolentry", "\"mytxid\"")); } uint256 hash = ParseHashV(request.params[0], "parameter 1"); LOCK(mempool.cs); CTxMemPool::txiter it = mempool.mapTx.find(hash); if (it == mempool.mapTx.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not in mempool"); } const CTxMemPoolEntry &e = *it; UniValue info(UniValue::VOBJ); entryToJSON(info, e); return info; } UniValue getblockhash(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "getblockhash height\n" "\nReturns hash of block in best-block-chain at height provided.\n" "\nArguments:\n" "1. height (numeric, required) The height index\n" "\nResult:\n" "\"hash\" (string) The block hash\n" "\nExamples:\n" + HelpExampleCli("getblockhash", "1000") + HelpExampleRpc("getblockhash", "1000")); } LOCK(cs_main); int nHeight = request.params[0].get_int(); if (nHeight < 0 || nHeight > chainActive.Height()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block height out of range"); } CBlockIndex *pblockindex = chainActive[nHeight]; return pblockindex->GetBlockHash().GetHex(); } UniValue getblockheader(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblockheader \"hash\" ( verbose )\n" "\nIf verbose is false, returns a string that is serialized, " "hex-encoded data for blockheader 'hash'.\n" "If verbose is true, returns an Object with information about " "blockheader .\n" "\nArguments:\n" "1. \"hash\" (string, required) The block hash\n" "2. verbose (boolean, optional, default=true) true for a " "json object, false for the hex encoded data\n" "\nResult (for verbose = true):\n" "{\n" " \"hash\" : \"hash\", (string) the block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"0000...1f3\" (string) Expected number of " "hashes required to produce the current chain (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\", (string) The hash of the " "next block\n" "}\n" "\nResult (for verbose=false):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nExamples:\n" + HelpExampleCli("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"") + HelpExampleRpc("getblockheader", "\"00000000c937983704a73af28acdec3" "7b049d214adbda81d7e2a3dd146f6ed09" "\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); bool fVerbose = true; if (request.params.size() > 1) { fVerbose = request.params[1].get_bool(); } if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION); ssBlock << pblockindex->GetBlockHeader(); std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockheaderToJSON(pblockindex); } UniValue getblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "getblock \"blockhash\" ( verbose )\n" "\nIf verbose is false, returns a string that is serialized, " "hex-encoded data for block 'hash'.\n" "If verbose is true, returns an Object with information about " "block .\n" "\nArguments:\n" "1. \"blockhash\" (string, required) The block hash\n" "2. verbose (boolean, optional, default=true) true " "for a json object, false for the hex encoded data\n" "\nResult (for verbose = true):\n" "{\n" " \"hash\" : \"hash\", (string) the block hash (same as " "provided)\n" " \"confirmations\" : n, (numeric) The number of confirmations, " "or -1 if the block is not on the main chain\n" " \"size\" : n, (numeric) The block size\n" " \"height\" : n, (numeric) The block height or index\n" " \"version\" : n, (numeric) The block version\n" " \"versionHex\" : \"00000000\", (string) The block version " "formatted in hexadecimal\n" " \"merkleroot\" : \"xxxx\", (string) The merkle root\n" " \"tx\" : [ (array of string) The transaction ids\n" " \"transactionid\" (string) The transaction id\n" " ,...\n" " ],\n" " \"time\" : ttt, (numeric) The block time in seconds " "since epoch (Jan 1 1970 GMT)\n" " \"mediantime\" : ttt, (numeric) The median block time in " "seconds since epoch (Jan 1 1970 GMT)\n" " \"nonce\" : n, (numeric) The nonce\n" " \"bits\" : \"1d00ffff\", (string) The bits\n" " \"difficulty\" : x.xxx, (numeric) The difficulty\n" " \"chainwork\" : \"xxxx\", (string) Expected number of hashes " "required to produce the chain up to this block (in hex)\n" " \"previousblockhash\" : \"hash\", (string) The hash of the " "previous block\n" " \"nextblockhash\" : \"hash\" (string) The hash of the " "next block\n" "}\n" "\nResult (for verbose=false):\n" "\"data\" (string) A string that is serialized, " "hex-encoded data for block 'hash'.\n" "\nExamples:\n" + HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"") + HelpExampleRpc("getblock", "\"00000000c937983704a73af28acdec37b049d" "214adbda81d7e2a3dd146f6ed09\"")); } LOCK(cs_main); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); bool fVerbose = true; if (request.params.size() > 1) { fVerbose = request.params[1].get_bool(); } if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlock block; CBlockIndex *pblockindex = mapBlockIndex[hash]; if (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0) { throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)"); } if (!ReadBlockFromDisk(block, pblockindex, config)) { // Block not found on disk. This could be because we have the block // header in our index but don't have the block (for example if a // non-whitelisted node sends us an unrequested long chain of valid // blocks, we add the headers to our index, but don't accept the block). throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); } if (!fVerbose) { CDataStream ssBlock(SER_NETWORK, PROTOCOL_VERSION | RPCSerializationFlags()); ssBlock << block; std::string strHex = HexStr(ssBlock.begin(), ssBlock.end()); return strHex; } return blockToJSON(config, block, pblockindex); } struct CCoinsStats { int nHeight; uint256 hashBlock; uint64_t nTransactions; uint64_t nTransactionOutputs; uint64_t nBogoSize; uint256 hashSerialized; uint64_t nDiskSize; Amount nTotalAmount; CCoinsStats() : nHeight(0), nTransactions(0), nTransactionOutputs(0), nBogoSize(0), nDiskSize(0), nTotalAmount(0) {} }; static void ApplyStats(CCoinsStats &stats, CHashWriter &ss, const uint256 &hash, const std::map &outputs) { assert(!outputs.empty()); ss << hash; ss << VARINT(outputs.begin()->second.GetHeight() * 2 + outputs.begin()->second.IsCoinBase()); stats.nTransactions++; for (const auto output : outputs) { ss << VARINT(output.first + 1); ss << output.second.GetTxOut().scriptPubKey; ss << VARINT(output.second.GetTxOut().nValue.GetSatoshis()); stats.nTransactionOutputs++; stats.nTotalAmount += output.second.GetTxOut().nValue; stats.nBogoSize += 32 /* txid */ + 4 /* vout index */ + 4 /* height + coinbase */ + 8 /* amount */ + 2 /* scriptPubKey len */ + output.second.GetTxOut().scriptPubKey.size() /* scriptPubKey */; } ss << VARINT(0); } //! Calculate statistics about the unspent transaction output set static bool GetUTXOStats(CCoinsView *view, CCoinsStats &stats) { std::unique_ptr pcursor(view->Cursor()); CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); stats.hashBlock = pcursor->GetBestBlock(); { LOCK(cs_main); stats.nHeight = mapBlockIndex.find(stats.hashBlock)->second->nHeight; } ss << stats.hashBlock; uint256 prevkey; std::map outputs; while (pcursor->Valid()) { boost::this_thread::interruption_point(); COutPoint key; Coin coin; if (pcursor->GetKey(key) && pcursor->GetValue(coin)) { if (!outputs.empty() && key.hash != prevkey) { ApplyStats(stats, ss, prevkey, outputs); outputs.clear(); } prevkey = key.hash; outputs[key.n] = std::move(coin); } else { return error("%s: unable to read value", __func__); } pcursor->Next(); } if (!outputs.empty()) { ApplyStats(stats, ss, prevkey, outputs); } stats.hashSerialized = ss.GetHash(); stats.nDiskSize = view->EstimateSize(); return true; } UniValue pruneblockchain(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "pruneblockchain\n" "\nArguments:\n" "1. \"height\" (numeric, required) The block height to prune " "up to. May be set to a discrete height, or a unix timestamp\n" " to prune blocks whose block time is at least 2 " "hours older than the provided timestamp.\n" "\nResult:\n" "n (numeric) Height of the last block pruned.\n" "\nExamples:\n" + HelpExampleCli("pruneblockchain", "1000") + HelpExampleRpc("pruneblockchain", "1000")); } if (!fPruneMode) { throw JSONRPCError( RPC_MISC_ERROR, "Cannot prune blocks because node is not in prune mode."); } LOCK(cs_main); int heightParam = request.params[0].get_int(); if (heightParam < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Negative block height."); } // Height value more than a billion is too high to be a block height, and // too low to be a block time (corresponds to timestamp from Sep 2001). if (heightParam > 1000000000) { // Add a 2 hour buffer to include blocks which might have had old // timestamps CBlockIndex *pindex = chainActive.FindEarliestAtLeast(heightParam - TIMESTAMP_WINDOW); if (!pindex) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Could not find block with at least the specified timestamp."); } heightParam = pindex->nHeight; } unsigned int height = (unsigned int)heightParam; unsigned int chainHeight = (unsigned int)chainActive.Height(); - if (chainHeight < Params().PruneAfterHeight()) { + if (chainHeight < config.GetChainParams().PruneAfterHeight()) { throw JSONRPCError(RPC_MISC_ERROR, "Blockchain is too short for pruning."); } else if (height > chainHeight) { throw JSONRPCError( RPC_INVALID_PARAMETER, "Blockchain is shorter than the attempted prune height."); } else if (height > chainHeight - MIN_BLOCKS_TO_KEEP) { LogPrint(BCLog::RPC, "Attempt to prune blocks close to the tip. " "Retaining the minimum number of blocks."); height = chainHeight - MIN_BLOCKS_TO_KEEP; } PruneBlockFilesManual(height); return uint64_t(height); } UniValue gettxoutsetinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "gettxoutsetinfo\n" "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n" "\nResult:\n" "{\n" " \"height\":n, (numeric) The current block height (index)\n" " \"bestblock\": \"hex\", (string) the best block hash hex\n" " \"transactions\": n, (numeric) The number of transactions\n" " \"txouts\": n, (numeric) The number of output " "transactions\n" " \"bogosize\": n, (numeric) A database-independent " "metric for UTXO set size\n" " \"hash_serialized\": \"hash\", (string) The serialized hash\n" " \"disk_size\": n, (numeric) The estimated size of the " "chainstate on disk\n" " \"total_amount\": x.xxx (numeric) The total amount\n" "}\n" "\nExamples:\n" + HelpExampleCli("gettxoutsetinfo", "") + HelpExampleRpc("gettxoutsetinfo", "")); } UniValue ret(UniValue::VOBJ); CCoinsStats stats; FlushStateToDisk(); if (GetUTXOStats(pcoinsTip, stats)) { ret.push_back(Pair("height", int64_t(stats.nHeight))); ret.push_back(Pair("bestblock", stats.hashBlock.GetHex())); ret.push_back(Pair("transactions", int64_t(stats.nTransactions))); ret.push_back(Pair("txouts", int64_t(stats.nTransactionOutputs))); ret.push_back(Pair("bogosize", int64_t(stats.nBogoSize))); ret.push_back(Pair("hash_serialized", stats.hashSerialized.GetHex())); ret.push_back(Pair("disk_size", stats.nDiskSize)); ret.push_back( Pair("total_amount", ValueFromAmount(stats.nTotalAmount))); } else { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } return ret; } UniValue gettxout(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "gettxout \"txid\" n ( include_mempool )\n" "\nReturns details about an unspent transaction output.\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id\n" "2. n (numeric, required) vout number\n" "3. include_mempool (boolean, optional) Whether to include the " "mempool\n" "\nResult:\n" "{\n" " \"bestblock\" : \"hash\", (string) the block hash\n" " \"confirmations\" : n, (numeric) The number of " "confirmations\n" " \"value\" : x.xxx, (numeric) The transaction value " "in " + CURRENCY_UNIT + "\n" " \"scriptPubKey\" : { (json object)\n" " \"asm\" : \"code\", (string) \n" " \"hex\" : \"hex\", (string) \n" " \"reqSigs\" : n, (numeric) Number of required " "signatures\n" " \"type\" : \"pubkeyhash\", (string) The type, eg pubkeyhash\n" " \"addresses\" : [ (array of string) array of " "bitcoin addresses\n" " \"address\" (string) bitcoin address\n" " ,...\n" " ]\n" " },\n" " \"coinbase\" : true|false (boolean) Coinbase or not\n" "}\n" "\nExamples:\n" "\nGet unspent transactions\n" + HelpExampleCli("listunspent", "") + "\nView the details\n" + HelpExampleCli("gettxout", "\"txid\" 1") + "\nAs a json rpc call\n" + HelpExampleRpc("gettxout", "\"txid\", 1")); } LOCK(cs_main); UniValue ret(UniValue::VOBJ); std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); int n = request.params[1].get_int(); COutPoint out(hash, n); bool fMempool = true; if (request.params.size() > 2) { fMempool = request.params[2].get_bool(); } Coin coin; if (fMempool) { LOCK(mempool.cs); CCoinsViewMemPool view(pcoinsTip, mempool); if (!view.GetCoin(out, coin) || mempool.isSpent(out)) { // TODO: this should be done by the CCoinsViewMemPool return NullUniValue; } } else { if (!pcoinsTip->GetCoin(out, coin)) { return NullUniValue; } } BlockMap::iterator it = mapBlockIndex.find(pcoinsTip->GetBestBlock()); CBlockIndex *pindex = it->second; ret.push_back(Pair("bestblock", pindex->GetBlockHash().GetHex())); if (coin.GetHeight() == MEMPOOL_HEIGHT) { ret.push_back(Pair("confirmations", 0)); } else { ret.push_back(Pair("confirmations", int64_t(pindex->nHeight - coin.GetHeight() + 1))); } ret.push_back(Pair("value", ValueFromAmount(coin.GetTxOut().nValue))); UniValue o(UniValue::VOBJ); ScriptPubKeyToJSON(config, coin.GetTxOut().scriptPubKey, o, true); ret.push_back(Pair("scriptPubKey", o)); ret.push_back(Pair("coinbase", coin.IsCoinBase())); return ret; } UniValue verifychain(const Config &config, const JSONRPCRequest &request) { int nCheckLevel = gArgs.GetArg("-checklevel", DEFAULT_CHECKLEVEL); int nCheckDepth = gArgs.GetArg("-checkblocks", DEFAULT_CHECKBLOCKS); if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "verifychain ( checklevel nblocks )\n" "\nVerifies blockchain database.\n" "\nArguments:\n" "1. checklevel (numeric, optional, 0-4, default=" + strprintf("%d", nCheckLevel) + ") How thorough the block verification is.\n" "2. nblocks (numeric, optional, default=" + strprintf("%d", nCheckDepth) + ", 0=all) The number of blocks to check.\n" "\nResult:\n" "true|false (boolean) Verified or not\n" "\nExamples:\n" + HelpExampleCli("verifychain", "") + HelpExampleRpc("verifychain", "")); } LOCK(cs_main); if (request.params.size() > 0) { nCheckLevel = request.params[0].get_int(); } if (request.params.size() > 1) { nCheckDepth = request.params[1].get_int(); } return CVerifyDB().VerifyDB(config, pcoinsTip, nCheckLevel, nCheckDepth); } /** Implementation of IsSuperMajority with better feedback */ static UniValue SoftForkMajorityDesc(int version, CBlockIndex *pindex, const Consensus::Params &consensusParams) { UniValue rv(UniValue::VOBJ); bool activated = false; switch (version) { case 2: activated = pindex->nHeight >= consensusParams.BIP34Height; break; case 3: activated = pindex->nHeight >= consensusParams.BIP66Height; break; case 4: activated = pindex->nHeight >= consensusParams.BIP65Height; break; } rv.push_back(Pair("status", activated)); return rv; } static UniValue SoftForkDesc(const std::string &name, int version, CBlockIndex *pindex, const Consensus::Params &consensusParams) { UniValue rv(UniValue::VOBJ); rv.push_back(Pair("id", name)); rv.push_back(Pair("version", version)); rv.push_back( Pair("reject", SoftForkMajorityDesc(version, pindex, consensusParams))); return rv; } static UniValue BIP9SoftForkDesc(const Consensus::Params &consensusParams, Consensus::DeploymentPos id) { UniValue rv(UniValue::VOBJ); const ThresholdState thresholdState = VersionBitsTipState(consensusParams, id); switch (thresholdState) { case THRESHOLD_DEFINED: rv.push_back(Pair("status", "defined")); break; case THRESHOLD_STARTED: rv.push_back(Pair("status", "started")); break; case THRESHOLD_LOCKED_IN: rv.push_back(Pair("status", "locked_in")); break; case THRESHOLD_ACTIVE: rv.push_back(Pair("status", "active")); break; case THRESHOLD_FAILED: rv.push_back(Pair("status", "failed")); break; } if (THRESHOLD_STARTED == thresholdState) { rv.push_back(Pair("bit", consensusParams.vDeployments[id].bit)); } rv.push_back( Pair("startTime", consensusParams.vDeployments[id].nStartTime)); rv.push_back(Pair("timeout", consensusParams.vDeployments[id].nTimeout)); rv.push_back( Pair("since", VersionBitsTipStateSinceHeight(consensusParams, id))); return rv; } void BIP9SoftForkDescPushBack(UniValue &bip9_softforks, const std::string &name, const Consensus::Params &consensusParams, Consensus::DeploymentPos id) { // Deployments with timeout value of 0 are hidden. // A timeout value of 0 guarantees a softfork will never be activated. // This is used when softfork codes are merged without specifying the // deployment schedule. if (consensusParams.vDeployments[id].nTimeout > 0) { bip9_softforks.push_back( Pair(name, BIP9SoftForkDesc(consensusParams, id))); } } UniValue getblockchaininfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getblockchaininfo\n" "Returns an object containing various state info regarding " "blockchain processing.\n" "\nResult:\n" "{\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" " \"blocks\": xxxxxx, (numeric) the current number of " "blocks processed in the server\n" " \"headers\": xxxxxx, (numeric) the current number of " "headers we have validated\n" " \"bestblockhash\": \"...\", (string) the hash of the currently " "best block\n" " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" " \"mediantime\": xxxxxx, (numeric) median time for the " "current best block\n" " \"verificationprogress\": xxxx, (numeric) estimate of " "verification progress [0..1]\n" " \"chainwork\": \"xxxx\" (string) total amount of work in " "active chain, in hexadecimal\n" " \"pruned\": xx, (boolean) if the blocks are subject " "to pruning\n" " \"pruneheight\": xxxxxx, (numeric) lowest-height complete " "block stored\n" " \"softforks\": [ (array) status of softforks in " "progress\n" " {\n" " \"id\": \"xxxx\", (string) name of softfork\n" " \"version\": xx, (numeric) block version\n" " \"reject\": { (object) progress toward " "rejecting pre-softfork blocks\n" " \"status\": xx, (boolean) true if threshold " "reached\n" " },\n" " }, ...\n" " ],\n" " \"bip9_softforks\": { (object) status of BIP9 " "softforks in progress\n" " \"xxxx\" : { (string) name of the softfork\n" " \"status\": \"xxxx\", (string) one of \"defined\", " "\"started\", \"locked_in\", \"active\", \"failed\"\n" " \"bit\": xx, (numeric) the bit (0-28) in the " "block version field used to signal this softfork (only for " "\"started\" status)\n" " \"startTime\": xx, (numeric) the minimum median " "time past of a block at which the bit gains its meaning\n" " \"timeout\": xx, (numeric) the median time past " "of a block at which the deployment is considered failed if not " "yet locked in\n" " \"since\": xx (numeric) height of the first " "block to which the status applies\n" " }\n" " }\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblockchaininfo", "") + HelpExampleRpc("getblockchaininfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); - obj.push_back(Pair("chain", Params().NetworkIDString())); + obj.push_back(Pair("chain", config.GetChainParams().NetworkIDString())); obj.push_back(Pair("blocks", int(chainActive.Height()))); obj.push_back( Pair("headers", pindexBestHeader ? pindexBestHeader->nHeight : -1)); obj.push_back( Pair("bestblockhash", chainActive.Tip()->GetBlockHash().GetHex())); obj.push_back(Pair("difficulty", double(GetDifficulty(chainActive.Tip())))); obj.push_back( Pair("mediantime", int64_t(chainActive.Tip()->GetMedianTimePast()))); obj.push_back( Pair("verificationprogress", - GuessVerificationProgress(Params().TxData(), chainActive.Tip()))); + GuessVerificationProgress(config.GetChainParams().TxData(), + chainActive.Tip()))); obj.push_back(Pair("chainwork", chainActive.Tip()->nChainWork.GetHex())); obj.push_back(Pair("pruned", fPruneMode)); - const Consensus::Params &consensusParams = Params().GetConsensus(); + const Consensus::Params &consensusParams = + config.GetChainParams().GetConsensus(); CBlockIndex *tip = chainActive.Tip(); UniValue softforks(UniValue::VARR); UniValue bip9_softforks(UniValue::VOBJ); softforks.push_back(SoftForkDesc("bip34", 2, tip, consensusParams)); softforks.push_back(SoftForkDesc("bip66", 3, tip, consensusParams)); softforks.push_back(SoftForkDesc("bip65", 4, tip, consensusParams)); BIP9SoftForkDescPushBack(bip9_softforks, "csv", consensusParams, Consensus::DEPLOYMENT_CSV); obj.push_back(Pair("softforks", softforks)); obj.push_back(Pair("bip9_softforks", bip9_softforks)); if (fPruneMode) { CBlockIndex *block = chainActive.Tip(); while (block && block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) { block = block->pprev; } obj.push_back(Pair("pruneheight", block->nHeight)); } return obj; } /** Comparison function for sorting the getchaintips heads. */ struct CompareBlocksByHeight { bool operator()(const CBlockIndex *a, const CBlockIndex *b) const { // Make sure that unequal blocks with the same height do not compare // equal. Use the pointers themselves to make a distinction. if (a->nHeight != b->nHeight) { return (a->nHeight > b->nHeight); } return a < b; } }; UniValue getchaintips(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getchaintips\n" "Return information about all known tips in the block tree," " including the main chain as well as orphaned branches.\n" "\nResult:\n" "[\n" " {\n" " \"height\": xxxx, (numeric) height of the chain tip\n" " \"hash\": \"xxxx\", (string) block hash of the tip\n" " \"branchlen\": 0 (numeric) zero for main chain\n" " \"status\": \"active\" (string) \"active\" for the main " "chain\n" " },\n" " {\n" " \"height\": xxxx,\n" " \"hash\": \"xxxx\",\n" " \"branchlen\": 1 (numeric) length of branch " "connecting the tip to the main chain\n" " \"status\": \"xxxx\" (string) status of the chain " "(active, valid-fork, valid-headers, headers-only, invalid)\n" " }\n" "]\n" "Possible values for status:\n" "1. \"invalid\" This branch contains at least one " "invalid block\n" "2. \"headers-only\" Not all blocks for this branch are " "available, but the headers are valid\n" "3. \"valid-headers\" All blocks are available for this " "branch, but they were never fully validated\n" "4. \"valid-fork\" This branch is not part of the " "active chain, but is fully validated\n" "5. \"active\" This is the tip of the active main " "chain, which is certainly valid\n" "\nExamples:\n" + HelpExampleCli("getchaintips", "") + HelpExampleRpc("getchaintips", "")); } LOCK(cs_main); /** * Idea: the set of chain tips is chainActive.tip, plus orphan blocks which * do not have another orphan building off of them. * Algorithm: * - Make one pass through mapBlockIndex, picking out the orphan blocks, * and also storing a set of the orphan block's pprev pointers. * - Iterate through the orphan blocks. If the block isn't pointed to by * another orphan, it is a chain tip. * - add chainActive.Tip() */ std::set setTips; std::set setOrphans; std::set setPrevs; for (const std::pair &item : mapBlockIndex) { if (!chainActive.Contains(item.second)) { setOrphans.insert(item.second); setPrevs.insert(item.second->pprev); } } for (std::set::iterator it = setOrphans.begin(); it != setOrphans.end(); ++it) { if (setPrevs.erase(*it) == 0) { setTips.insert(*it); } } // Always report the currently active tip. setTips.insert(chainActive.Tip()); /* Construct the output array. */ UniValue res(UniValue::VARR); for (const CBlockIndex *block : setTips) { UniValue obj(UniValue::VOBJ); obj.push_back(Pair("height", block->nHeight)); obj.push_back(Pair("hash", block->phashBlock->GetHex())); const int branchLen = block->nHeight - chainActive.FindFork(block)->nHeight; obj.push_back(Pair("branchlen", branchLen)); std::string status; if (chainActive.Contains(block)) { // This block is part of the currently active chain. status = "active"; } else if (block->nStatus & BLOCK_FAILED_MASK) { // This block or one of its ancestors is invalid. status = "invalid"; } else if (block->nChainTx == 0) { // This block cannot be connected because full block data for it or // one of its parents is missing. status = "headers-only"; } else if (block->IsValid(BLOCK_VALID_SCRIPTS)) { // This block is fully validated, but no longer part of the active // chain. It was probably the active block once, but was // reorganized. status = "valid-fork"; } else if (block->IsValid(BLOCK_VALID_TREE)) { // The headers for this block are valid, but it has not been // validated. It was probably never part of the most-work chain. status = "valid-headers"; } else { // No clue. status = "unknown"; } obj.push_back(Pair("status", status)); res.push_back(obj); } return res; } UniValue mempoolInfoToJSON() { UniValue ret(UniValue::VOBJ); ret.push_back(Pair("size", (int64_t)mempool.size())); ret.push_back(Pair("bytes", (int64_t)mempool.GetTotalTxSize())); ret.push_back(Pair("usage", (int64_t)mempool.DynamicMemoryUsage())); size_t maxmempool = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; ret.push_back(Pair("maxmempool", (int64_t)maxmempool)); ret.push_back( Pair("mempoolminfee", ValueFromAmount(mempool.GetMinFee(maxmempool).GetFeePerK()))); return ret; } UniValue getmempoolinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmempoolinfo\n" "\nReturns details on the active state of the TX memory pool.\n" "\nResult:\n" "{\n" " \"size\": xxxxx, (numeric) Current tx count\n" " \"bytes\": xxxxx, (numeric) Transaction size.\n" " \"usage\": xxxxx, (numeric) Total memory usage for " "the mempool\n" " \"maxmempool\": xxxxx, (numeric) Maximum memory usage " "for the mempool\n" " \"mempoolminfee\": xxxxx (numeric) Minimum fee for tx to " "be accepted\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmempoolinfo", "") + HelpExampleRpc("getmempoolinfo", "")); } return mempoolInfoToJSON(); } UniValue preciousblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "preciousblock \"blockhash\"\n" "\nTreats a block as if it were received before others with the " "same work.\n" "\nA later preciousblock call can override the effect of an " "earlier one.\n" "\nThe effects of preciousblock are not retained across restarts.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "mark as precious\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("preciousblock", "\"blockhash\"") + HelpExampleRpc("preciousblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CBlockIndex *pblockindex; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } pblockindex = mapBlockIndex[hash]; } CValidationState state; PreciousBlock(config, state, pblockindex); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue invalidateblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "invalidateblock \"blockhash\"\n" "\nPermanently marks a block as invalid, as if it " "violated a consensus rule.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of " "the block to mark as invalid\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("invalidateblock", "\"blockhash\"") + HelpExampleRpc("invalidateblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); CValidationState state; { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; InvalidateBlock(config, state, pblockindex); } if (state.IsValid()) { ActivateBestChain(config, state); } if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue reconsiderblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "reconsiderblock \"blockhash\"\n" "\nRemoves invalidity status of a block and its descendants, " "reconsider them for activation.\n" "This can be used to undo the effects of invalidateblock.\n" "\nArguments:\n" "1. \"blockhash\" (string, required) the hash of the block to " "reconsider\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("reconsiderblock", "\"blockhash\"") + HelpExampleRpc("reconsiderblock", "\"blockhash\"")); } std::string strHash = request.params[0].get_str(); uint256 hash(uint256S(strHash)); { LOCK(cs_main); if (mapBlockIndex.count(hash) == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } CBlockIndex *pblockindex = mapBlockIndex[hash]; ResetBlockFailureFlags(pblockindex); } CValidationState state; ActivateBestChain(config, state); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.GetRejectReason()); } return NullUniValue; } UniValue getchaintxstats(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getchaintxstats ( nblocks blockhash )\n" "\nCompute statistics about the total number and rate of " "transactions in the chain.\n" "\nArguments:\n" "1. nblocks (numeric, optional) Size of the window in number " "of blocks (default: one month).\n" "2. \"blockhash\" (string, optional) The hash of the block that " "ends the window.\n" "\nResult:\n" "{\n" " \"time\": xxxxx, (numeric) The timestamp for the " "final block in the window in UNIX format.\n" " \"txcount\": xxxxx, (numeric) The total number of " "transactions in the chain up to that point.\n" " \"window_block_count\": xxxxx, (numeric) Size of the window in " "number of blocks.\n" " \"window_tx_count\": xxxxx, (numeric) The number of " "transactions in the window. Only returned if " "\"window_block_count\" is > 0.\n" " \"window_interval\": xxxxx, (numeric) The elapsed time in " "the window in seconds. Only returned if \"window_block_count\" is " "> 0.\n" " \"txrate\": x.xx, (numeric) The average rate of " "transactions per second in the window. Only returned if " "\"window_interval\" is > 0.\n" "}\n" "\nExamples:\n" + HelpExampleCli("getchaintxstats", "") + HelpExampleRpc("getchaintxstats", "2016")); } const CBlockIndex *pindex; // By default: 1 month - int blockcount = - 30 * 24 * 60 * 60 / Params().GetConsensus().nPowTargetSpacing; + int blockcount = 30 * 24 * 60 * 60 / + config.GetChainParams().GetConsensus().nPowTargetSpacing; bool havehash = !request.params[1].isNull(); uint256 hash; if (havehash) { hash = uint256S(request.params[1].get_str()); } { LOCK(cs_main); if (havehash) { auto it = mapBlockIndex.find(hash); if (it == mapBlockIndex.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } pindex = it->second; if (!chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Block is not in main chain"); } } else { pindex = chainActive.Tip(); } } assert(pindex != nullptr); if (request.params[0].isNull()) { blockcount = std::max(0, std::min(blockcount, pindex->nHeight - 1)); } else { blockcount = request.params[0].get_int(); if (blockcount < 0 || (blockcount > 0 && blockcount >= pindex->nHeight)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid block count: " "should be between 0 and " "the block's height - 1"); } } const CBlockIndex *pindexPast = pindex->GetAncestor(pindex->nHeight - blockcount); int nTimeDiff = pindex->GetMedianTimePast() - pindexPast->GetMedianTimePast(); int nTxDiff = pindex->nChainTx - pindexPast->nChainTx; UniValue ret(UniValue::VOBJ); ret.push_back(Pair("time", (int64_t)pindex->nTime)); ret.push_back(Pair("txcount", (int64_t)pindex->nChainTx)); ret.push_back(Pair("window_block_count", blockcount)); if (blockcount > 0) { ret.push_back(Pair("window_tx_count", nTxDiff)); ret.push_back(Pair("window_interval", nTimeDiff)); if (nTimeDiff > 0) { ret.push_back(Pair("txrate", ((double)nTxDiff) / nTimeDiff)); } } return ret; } // clang-format off static const CRPCCommand commands[] = { // category name actor (function) okSafe argNames // ------------------- ------------------------ ---------------------- ------ ---------- { "blockchain", "getblockchaininfo", getblockchaininfo, true, {} }, { "blockchain", "getchaintxstats", &getchaintxstats, true, {"nblocks", "blockhash"} }, { "blockchain", "getbestblockhash", getbestblockhash, true, {} }, { "blockchain", "getblockcount", getblockcount, true, {} }, { "blockchain", "getblock", getblock, true, {"blockhash","verbose"} }, { "blockchain", "getblockhash", getblockhash, true, {"height"} }, { "blockchain", "getblockheader", getblockheader, true, {"blockhash","verbose"} }, { "blockchain", "getchaintips", getchaintips, true, {} }, { "blockchain", "getdifficulty", getdifficulty, true, {} }, { "blockchain", "getmempoolancestors", getmempoolancestors, true, {"txid","verbose"} }, { "blockchain", "getmempooldescendants", getmempooldescendants, true, {"txid","verbose"} }, { "blockchain", "getmempoolentry", getmempoolentry, true, {"txid"} }, { "blockchain", "getmempoolinfo", getmempoolinfo, true, {} }, { "blockchain", "getrawmempool", getrawmempool, true, {"verbose"} }, { "blockchain", "gettxout", gettxout, true, {"txid","n","include_mempool"} }, { "blockchain", "gettxoutsetinfo", gettxoutsetinfo, true, {} }, { "blockchain", "pruneblockchain", pruneblockchain, true, {"height"} }, { "blockchain", "verifychain", verifychain, true, {"checklevel","nblocks"} }, { "blockchain", "preciousblock", preciousblock, true, {"blockhash"} }, /* Not shown in help */ { "hidden", "invalidateblock", invalidateblock, true, {"blockhash"} }, { "hidden", "reconsiderblock", reconsiderblock, true, {"blockhash"} }, { "hidden", "waitfornewblock", waitfornewblock, true, {"timeout"} }, { "hidden", "waitforblock", waitforblock, true, {"blockhash","timeout"} }, { "hidden", "waitforblockheight", waitforblockheight, true, {"height","timeout"} }, }; // clang-format on void RegisterBlockchainRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index daa7c5ca0..746a948ec 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -1,1062 +1,1062 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/mining.h" #include "amount.h" #include "chain.h" #include "chainparams.h" #include "config.h" #include "consensus/consensus.h" #include "consensus/params.h" #include "consensus/validation.h" #include "core_io.h" #include "dstencode.h" #include "init.h" #include "miner.h" #include "net.h" #include "policy/policy.h" #include "pow.h" #include "rpc/blockchain.h" #include "rpc/server.h" #include "txmempool.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #include "validationinterface.h" #include #include #include /** * Return average network hashes per second based on the last 'lookup' blocks, * or from the last difficulty change if 'lookup' is nonpositive. If 'height' is * nonnegative, compute the estimate at the time when a given block was found. */ static UniValue GetNetworkHashPS(int lookup, int height) { CBlockIndex *pb = chainActive.Tip(); if (height >= 0 && height < chainActive.Height()) { pb = chainActive[height]; } if (pb == nullptr || !pb->nHeight) { return 0; } // If lookup is -1, then use blocks since last difficulty change. if (lookup <= 0) { lookup = pb->nHeight % Params().GetConsensus().DifficultyAdjustmentInterval() + 1; } // If lookup is larger than chain, then set it to chain length. if (lookup > pb->nHeight) { lookup = pb->nHeight; } CBlockIndex *pb0 = pb; int64_t minTime = pb0->GetBlockTime(); int64_t maxTime = minTime; for (int i = 0; i < lookup; i++) { pb0 = pb0->pprev; int64_t time = pb0->GetBlockTime(); minTime = std::min(time, minTime); maxTime = std::max(time, maxTime); } // In case there's a situation where minTime == maxTime, we don't want a // divide by zero exception. if (minTime == maxTime) { return 0; } arith_uint256 workDiff = pb->nChainWork - pb0->nChainWork; int64_t timeDiff = maxTime - minTime; return workDiff.getdouble() / timeDiff; } static UniValue getnetworkhashps(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 2) { throw std::runtime_error( "getnetworkhashps ( nblocks height )\n" "\nReturns the estimated network hashes per second based on the " "last n blocks.\n" "Pass in [blocks] to override # of blocks, -1 specifies since last " "difficulty change.\n" "Pass in [height] to estimate the network speed at the time when a " "certain block was found.\n" "\nArguments:\n" "1. nblocks (numeric, optional, default=120) The number of " "blocks, or -1 for blocks since last difficulty change.\n" "2. height (numeric, optional, default=-1) To estimate at the " "time of the given height.\n" "\nResult:\n" "x (numeric) Hashes per second estimated\n" "\nExamples:\n" + HelpExampleCli("getnetworkhashps", "") + HelpExampleRpc("getnetworkhashps", "")); } LOCK(cs_main); return GetNetworkHashPS( request.params.size() > 0 ? request.params[0].get_int() : 120, request.params.size() > 1 ? request.params[1].get_int() : -1); } UniValue generateBlocks(const Config &config, std::shared_ptr coinbaseScript, int nGenerate, uint64_t nMaxTries, bool keepScript) { static const int nInnerLoopCount = 0x100000; int nHeightStart = 0; int nHeightEnd = 0; int nHeight = 0; { // Don't keep cs_main locked. LOCK(cs_main); nHeightStart = chainActive.Height(); nHeight = nHeightStart; nHeightEnd = nHeightStart + nGenerate; } unsigned int nExtraNonce = 0; UniValue blockHashes(UniValue::VARR); while (nHeight < nHeightEnd) { std::unique_ptr pblocktemplate( BlockAssembler(config).CreateNewBlock( coinbaseScript->reserveScript)); if (!pblocktemplate.get()) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block"); } CBlock *pblock = &pblocktemplate->block; { LOCK(cs_main); IncrementExtraNonce(config, pblock, chainActive.Tip(), nExtraNonce); } while (nMaxTries > 0 && pblock->nNonce < nInnerLoopCount && !CheckProofOfWork(pblock->GetHash(), pblock->nBits, config)) { ++pblock->nNonce; --nMaxTries; } if (nMaxTries == 0) { break; } if (pblock->nNonce == nInnerLoopCount) { continue; } std::shared_ptr shared_pblock = std::make_shared(*pblock); if (!ProcessNewBlock(config, shared_pblock, true, nullptr)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted"); } ++nHeight; blockHashes.push_back(pblock->GetHash().GetHex()); // Mark script as important because it was used at least for one // coinbase output if the script came from the wallet. if (keepScript) { coinbaseScript->KeepScript(); } } return blockHashes; } static UniValue generatetoaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 2 || request.params.size() > 3) { throw std::runtime_error( "generatetoaddress nblocks address (maxtries)\n" "\nMine blocks immediately to a specified address (before the RPC " "call returns)\n" "\nArguments:\n" "1. nblocks (numeric, required) How many blocks are generated " "immediately.\n" "2. address (string, required) The address to send the newly " "generated bitcoin to.\n" "3. maxtries (numeric, optional) How many iterations to try " "(default = 1000000).\n" "\nResult:\n" "[ blockhashes ] (array) hashes of blocks generated\n" "\nExamples:\n" "\nGenerate 11 blocks to myaddress\n" + HelpExampleCli("generatetoaddress", "11 \"myaddress\"")); } int nGenerate = request.params[0].get_int(); uint64_t nMaxTries = 1000000; if (request.params.size() > 2) { nMaxTries = request.params[2].get_int(); } CTxDestination destination = DecodeDestination(request.params[1].get_str()); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Error: Invalid address"); } std::shared_ptr coinbaseScript = std::make_shared(); coinbaseScript->reserveScript = GetScriptForDestination(destination); return generateBlocks(config, coinbaseScript, nGenerate, nMaxTries, false); } static UniValue getmininginfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmininginfo\n" "\nReturns a json object containing mining-related information." "\nResult:\n" "{\n" " \"blocks\": nnn, (numeric) The current block\n" " \"currentblocksize\": nnn, (numeric) The last block size\n" " \"currentblocktx\": nnn, (numeric) The last block " "transaction\n" " \"difficulty\": xxx.xxxxx (numeric) The current difficulty\n" " \"errors\": \"...\" (string) Current errors\n" " \"networkhashps\": nnn, (numeric) The network hashes per " "second\n" " \"pooledtx\": n (numeric) The size of the mempool\n" " \"chain\": \"xxxx\", (string) current network name as " "defined in BIP70 (main, test, regtest)\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmininginfo", "") + HelpExampleRpc("getmininginfo", "")); } LOCK(cs_main); UniValue obj(UniValue::VOBJ); obj.push_back(Pair("blocks", int(chainActive.Height()))); obj.push_back(Pair("currentblocksize", uint64_t(nLastBlockSize))); obj.push_back(Pair("currentblocktx", uint64_t(nLastBlockTx))); obj.push_back(Pair("difficulty", double(GetDifficulty(chainActive.Tip())))); obj.push_back( Pair("blockprioritypercentage", uint8_t(gArgs.GetArg("-blockprioritypercentage", DEFAULT_BLOCK_PRIORITY_PERCENTAGE)))); obj.push_back(Pair("errors", GetWarnings("statusbar"))); obj.push_back(Pair("networkhashps", getnetworkhashps(config, request))); obj.push_back(Pair("pooledtx", uint64_t(mempool.size()))); - obj.push_back(Pair("chain", Params().NetworkIDString())); + obj.push_back(Pair("chain", config.GetChainParams().NetworkIDString())); return obj; } // NOTE: Unlike wallet RPC (which use BCH values), mining RPCs follow GBT (BIP // 22) in using satoshi amounts static UniValue prioritisetransaction(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( "prioritisetransaction \n" "Accepts the transaction into mined blocks at a higher (or lower) " "priority\n" "\nArguments:\n" "1. \"txid\" (string, required) The transaction id.\n" "2. priority_delta (numeric, required) The priority to add or " "subtract.\n" " The transaction selection algorithm considers " "the tx as it would have a higher priority.\n" " (priority of a transaction is calculated: " "coinage * value_in_satoshis / txsize) \n" "3. fee_delta (numeric, required) The fee value (in satoshis) " "to add (or subtract, if negative).\n" " The fee is not actually paid, only the " "algorithm for selecting transactions into a block\n" " considers the transaction as it would have paid " "a higher (or lower) fee.\n" "\nResult:\n" "true (boolean) Returns true\n" "\nExamples:\n" + HelpExampleCli("prioritisetransaction", "\"txid\" 0.0 10000") + HelpExampleRpc("prioritisetransaction", "\"txid\", 0.0, 10000")); } LOCK(cs_main); uint256 hash = ParseHashStr(request.params[0].get_str(), "txid"); Amount nAmount(request.params[2].get_int64()); mempool.PrioritiseTransaction(hash, request.params[0].get_str(), request.params[1].get_real(), nAmount); return true; } // NOTE: Assumes a conclusive result; if result is inconclusive, it must be // handled by caller static UniValue BIP22ValidationResult(const Config &config, const CValidationState &state) { if (state.IsValid()) { return NullUniValue; } std::string strRejectReason = state.GetRejectReason(); if (state.IsError()) { throw JSONRPCError(RPC_VERIFY_ERROR, strRejectReason); } if (state.IsInvalid()) { if (strRejectReason.empty()) { return "rejected"; } return strRejectReason; } // Should be impossible. return "valid?"; } std::string gbt_vb_name(const Consensus::DeploymentPos pos) { const struct BIP9DeploymentInfo &vbinfo = VersionBitsDeploymentInfo[pos]; std::string s = vbinfo.name; if (!vbinfo.gbt_force) { s.insert(s.begin(), '!'); } return s; } static UniValue getblocktemplate(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() > 1) { throw std::runtime_error( "getblocktemplate ( TemplateRequest )\n" "\nIf the request parameters include a 'mode' key, that is used to " "explicitly select between the default 'template' request or a " "'proposal'.\n" "It returns data needed to construct a block to work on.\n" "For full specification, see BIPs 22, 23, 9, and 145:\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0022.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0023.mediawiki\n" " " "https://github.com/bitcoin/bips/blob/master/" "bip-0009.mediawiki#getblocktemplate_changes\n" " " "https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki\n" "\nArguments:\n" "1. template_request (json object, optional) A json object " "in the following spec\n" " {\n" " \"mode\":\"template\" (string, optional) This must be " "set to \"template\", \"proposal\" (see BIP 23), or omitted\n" " \"capabilities\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "feature, 'longpoll', 'coinbasetxn', 'coinbasevalue', 'proposal', " "'serverlist', 'workid'\n" " ,...\n" " ],\n" " \"rules\":[ (array, optional) A list of " "strings\n" " \"support\" (string) client side supported " "softfork deployment\n" " ,...\n" " ]\n" " }\n" "\n" "\nResult:\n" "{\n" " \"version\" : n, (numeric) The preferred " "block version\n" " \"rules\" : [ \"rulename\", ... ], (array of strings) " "specific block rules that are to be enforced\n" " \"vbavailable\" : { (json object) set of " "pending, supported versionbit (BIP 9) softfork deployments\n" " \"rulename\" : bitnumber (numeric) identifies the " "bit number as indicating acceptance and readiness for the named " "softfork rule\n" " ,...\n" " },\n" " \"vbrequired\" : n, (numeric) bit mask of " "versionbits the server requires set in submissions\n" " \"previousblockhash\" : \"xxxx\", (string) The hash of " "current highest block\n" " \"transactions\" : [ (array) contents of " "non-coinbase transactions that should be included in the next " "block\n" " {\n" " \"data\" : \"xxxx\", (string) transaction " "data encoded in hexadecimal (byte-for-byte)\n" " \"txid\" : \"xxxx\", (string) transaction id " "encoded in little-endian hexadecimal\n" " \"hash\" : \"xxxx\", (string) hash encoded " "in little-endian hexadecimal (including witness data)\n" " \"depends\" : [ (array) array of numbers " "\n" " n (numeric) transactions " "before this one (by 1-based index in 'transactions' list) that " "must be present in the final block if this one is\n" " ,...\n" " ],\n" " \"fee\": n, (numeric) difference in " "value between transaction inputs and outputs (in Satoshis); for " "coinbase transactions, this is a negative Number of the total " "collected block fees (ie, not including the block subsidy); if " "key is not present, fee is unknown and clients MUST NOT assume " "there isn't one\n" " \"sigops\" : n, (numeric) total SigOps " "cost, as counted for purposes of block limits; if key is not " "present, sigop cost is unknown and clients MUST NOT assume it is " "zero\n" " \"required\" : true|false (boolean) if provided and " "true, this transaction must be in the final block\n" " }\n" " ,...\n" " ],\n" " \"coinbaseaux\" : { (json object) data that " "should be included in the coinbase's scriptSig content\n" " \"flags\" : \"xx\" (string) key name is to " "be ignored, and value included in scriptSig\n" " },\n" " \"coinbasevalue\" : n, (numeric) maximum allowable " "input to coinbase transaction, including the generation award and " "transaction fees (in Satoshis)\n" " \"coinbasetxn\" : { ... }, (json object) information " "for coinbase transaction\n" " \"target\" : \"xxxx\", (string) The hash target\n" " \"mintime\" : xxx, (numeric) The minimum " "timestamp appropriate for next block time in seconds since epoch " "(Jan 1 1970 GMT)\n" " \"mutable\" : [ (array of string) list of " "ways the block template may be changed \n" " \"value\" (string) A way the block " "template may be changed, e.g. 'time', 'transactions', " "'prevblock'\n" " ,...\n" " ],\n" " \"noncerange\" : \"00000000ffffffff\",(string) A range of valid " "nonces\n" " \"sigoplimit\" : n, (numeric) limit of sigops " "in blocks\n" " \"sizelimit\" : n, (numeric) limit of block " "size\n" " \"curtime\" : ttt, (numeric) current timestamp " "in seconds since epoch (Jan 1 1970 GMT)\n" " \"bits\" : \"xxxxxxxx\", (string) compressed " "target of next block\n" " \"height\" : n (numeric) The height of the " "next block\n" "}\n" "\nExamples:\n" + HelpExampleCli("getblocktemplate", "") + HelpExampleRpc("getblocktemplate", "")); } LOCK(cs_main); std::string strMode = "template"; UniValue lpval = NullUniValue; std::set setClientRules; int64_t nMaxVersionPreVB = -1; if (request.params.size() > 0) { const UniValue &oparam = request.params[0].get_obj(); const UniValue &modeval = find_value(oparam, "mode"); if (modeval.isStr()) { strMode = modeval.get_str(); } else if (modeval.isNull()) { /* Do nothing */ } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } lpval = find_value(oparam, "longpollid"); if (strMode == "proposal") { const UniValue &dataval = find_value(oparam, "data"); if (!dataval.isStr()) { throw JSONRPCError(RPC_TYPE_ERROR, "Missing data String key for proposal"); } CBlock block; if (!DecodeHexBlk(block, dataval.get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } uint256 hash = block.GetHash(); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) { return "duplicate"; } if (pindex->nStatus & BLOCK_FAILED_MASK) { return "duplicate-invalid"; } return "duplicate-inconclusive"; } CBlockIndex *const pindexPrev = chainActive.Tip(); // TestBlockValidity only supports blocks built on the current Tip if (block.hashPrevBlock != pindexPrev->GetBlockHash()) { return "inconclusive-not-best-prevblk"; } CValidationState state; TestBlockValidity(config, state, block, pindexPrev, false, true); return BIP22ValidationResult(config, state); } const UniValue &aClientRules = find_value(oparam, "rules"); if (aClientRules.isArray()) { for (size_t i = 0; i < aClientRules.size(); ++i) { const UniValue &v = aClientRules[i]; setClientRules.insert(v.get_str()); } } else { // NOTE: It is important that this NOT be read if versionbits is // supported const UniValue &uvMaxVersion = find_value(oparam, "maxversion"); if (uvMaxVersion.isNum()) { nMaxVersionPreVB = uvMaxVersion.get_int64(); } } } if (strMode != "template") { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode"); } if (!g_connman) { throw JSONRPCError( RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); } if (g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL) == 0) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Bitcoin is not connected!"); } if (IsInitialBlockDownload()) { throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, "Bitcoin is downloading blocks..."); } static unsigned int nTransactionsUpdatedLast; if (!lpval.isNull()) { // Wait to respond until either the best block changes, OR a minute has // passed and there are more transactions uint256 hashWatchedChain; boost::system_time checktxtime; unsigned int nTransactionsUpdatedLastLP; if (lpval.isStr()) { // Format: std::string lpstr = lpval.get_str(); hashWatchedChain.SetHex(lpstr.substr(0, 64)); nTransactionsUpdatedLastLP = atoi64(lpstr.substr(64)); } else { // NOTE: Spec does not specify behaviour for non-string longpollid, // but this makes testing easier hashWatchedChain = chainActive.Tip()->GetBlockHash(); nTransactionsUpdatedLastLP = nTransactionsUpdatedLast; } // Release the wallet and main lock while waiting LEAVE_CRITICAL_SECTION(cs_main); { checktxtime = boost::get_system_time() + boost::posix_time::minutes(1); boost::unique_lock lock(csBestBlock); while (chainActive.Tip()->GetBlockHash() == hashWatchedChain && IsRPCRunning()) { if (!cvBlockChange.timed_wait(lock, checktxtime)) { // Timeout: Check transactions for update if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) { break; } checktxtime += boost::posix_time::seconds(10); } } } ENTER_CRITICAL_SECTION(cs_main); if (!IsRPCRunning()) { throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down"); } // TODO: Maybe recheck connections/IBD and (if something wrong) send an // expires-immediately template to stop miners? } // Update block static CBlockIndex *pindexPrev; static int64_t nStart; static std::unique_ptr pblocktemplate; if (pindexPrev != chainActive.Tip() || (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - nStart > 5)) { // Clear pindexPrev so future calls make a new block, despite any // failures from here on pindexPrev = nullptr; // Store the pindexBest used before CreateNewBlock, to avoid races nTransactionsUpdatedLast = mempool.GetTransactionsUpdated(); CBlockIndex *pindexPrevNew = chainActive.Tip(); nStart = GetTime(); // Create new block CScript scriptDummy = CScript() << OP_TRUE; pblocktemplate = BlockAssembler(config).CreateNewBlock(scriptDummy); if (!pblocktemplate) { throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory"); } // Need to update only after we know CreateNewBlock succeeded pindexPrev = pindexPrevNew; } // pointer for convenience CBlock *pblock = &pblocktemplate->block; const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // Update nTime UpdateTime(pblock, config, pindexPrev); pblock->nNonce = 0; UniValue aCaps(UniValue::VARR); aCaps.push_back("proposal"); UniValue transactions(UniValue::VARR); std::map setTxIndex; int i = 0; for (const auto &it : pblock->vtx) { const CTransaction &tx = *it; uint256 txId = tx.GetId(); setTxIndex[txId] = i++; if (tx.IsCoinBase()) { continue; } UniValue entry(UniValue::VOBJ); entry.push_back(Pair("data", EncodeHexTx(tx))); entry.push_back(Pair("txid", txId.GetHex())); entry.push_back(Pair("hash", tx.GetHash().GetHex())); UniValue deps(UniValue::VARR); for (const CTxIn &in : tx.vin) { if (setTxIndex.count(in.prevout.hash)) deps.push_back(setTxIndex[in.prevout.hash]); } entry.push_back(Pair("depends", deps)); int index_in_template = i - 1; entry.push_back(Pair( "fee", pblocktemplate->vTxFees[index_in_template].GetSatoshis())); int64_t nTxSigOps = pblocktemplate->vTxSigOpsCount[index_in_template]; entry.push_back(Pair("sigops", nTxSigOps)); transactions.push_back(entry); } UniValue aux(UniValue::VOBJ); aux.push_back( Pair("flags", HexStr(COINBASE_FLAGS.begin(), COINBASE_FLAGS.end()))); arith_uint256 hashTarget = arith_uint256().SetCompact(pblock->nBits); UniValue aMutable(UniValue::VARR); aMutable.push_back("time"); aMutable.push_back("transactions"); aMutable.push_back("prevblock"); UniValue result(UniValue::VOBJ); result.push_back(Pair("capabilities", aCaps)); UniValue aRules(UniValue::VARR); UniValue vbavailable(UniValue::VOBJ); for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = VersionBitsState(pindexPrev, consensusParams, pos, versionbitscache); switch (state) { case THRESHOLD_DEFINED: case THRESHOLD_FAILED: // Not exposed to GBT at all break; case THRESHOLD_LOCKED_IN: { // Ensure bit is set in block version, then fallthrough to get // vbavailable set. pblock->nVersion |= VersionBitsMask(consensusParams, pos); } // FALLTHROUGH case THRESHOLD_STARTED: { const struct BIP9DeploymentInfo &vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.push_back(Pair( gbt_vb_name(pos), consensusParams.vDeployments[pos].bit)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it // in the [default] version pblock->nVersion &= ~VersionBitsMask(consensusParams, pos); } } break; } case THRESHOLD_ACTIVE: { // Add to rules only const struct BIP9DeploymentInfo &vbinfo = VersionBitsDeploymentInfo[pos]; aRules.push_back(gbt_vb_name(pos)); if (setClientRules.find(vbinfo.name) == setClientRules.end()) { // Not supported by the client; make sure it's safe to // proceed if (!vbinfo.gbt_force) { // If we do anything other than throw an exception here, // be sure version/force isn't sent to old clients throw JSONRPCError( RPC_INVALID_PARAMETER, strprintf("Support for '%s' rule requires explicit " "client support", vbinfo.name)); } } break; } } } result.push_back(Pair("version", pblock->nVersion)); result.push_back(Pair("rules", aRules)); result.push_back(Pair("vbavailable", vbavailable)); result.push_back(Pair("vbrequired", int(0))); if (nMaxVersionPreVB >= 2) { // If VB is supported by the client, nMaxVersionPreVB is -1, so we won't // get here. Because BIP 34 changed how the generation transaction is // serialized, we can only use version/force back to v2 blocks. This is // safe to do [otherwise-]unconditionally only because we are throwing // an exception above if a non-force deployment gets activated. Note // that this can probably also be removed entirely after the first BIP9 // non-force deployment (ie, probably segwit) gets activated. aMutable.push_back("version/force"); } result.push_back(Pair("previousblockhash", pblock->hashPrevBlock.GetHex())); result.push_back(Pair("transactions", transactions)); result.push_back(Pair("coinbaseaux", aux)); result.push_back( Pair("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue.GetSatoshis())); result.push_back(Pair("longpollid", chainActive.Tip()->GetBlockHash().GetHex() + i64tostr(nTransactionsUpdatedLast))); result.push_back(Pair("target", hashTarget.GetHex())); result.push_back( Pair("mintime", (int64_t)pindexPrev->GetMedianTimePast() + 1)); result.push_back(Pair("mutable", aMutable)); result.push_back(Pair("noncerange", "00000000ffffffff")); // FIXME: Allow for mining block greater than 1M. result.push_back( Pair("sigoplimit", GetMaxBlockSigOpsCount(DEFAULT_MAX_BLOCK_SIZE))); result.push_back(Pair("sizelimit", DEFAULT_MAX_BLOCK_SIZE)); result.push_back(Pair("curtime", pblock->GetBlockTime())); result.push_back(Pair("bits", strprintf("%08x", pblock->nBits))); result.push_back(Pair("height", (int64_t)(pindexPrev->nHeight + 1))); return result; } class submitblock_StateCatcher : public CValidationInterface { public: uint256 hash; bool found; CValidationState state; submitblock_StateCatcher(const uint256 &hashIn) : hash(hashIn), found(false), state() {} protected: void BlockChecked(const CBlock &block, const CValidationState &stateIn) override { if (block.GetHash() != hash) { return; } found = true; state = stateIn; } }; static UniValue submitblock(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() < 1 || request.params.size() > 2) { throw std::runtime_error( "submitblock \"hexdata\" ( \"jsonparametersobject\" )\n" "\nAttempts to submit new block to network.\n" "The 'jsonparametersobject' parameter is currently ignored.\n" "See https://en.bitcoin.it/wiki/BIP_0022 for full specification.\n" "\nArguments\n" "1. \"hexdata\" (string, required) the hex-encoded block " "data to submit\n" "2. \"parameters\" (string, optional) object of optional " "parameters\n" " {\n" " \"workid\" : \"id\" (string, optional) if the server " "provided a workid, it MUST be included with submissions\n" " }\n" "\nResult:\n" "\nExamples:\n" + HelpExampleCli("submitblock", "\"mydata\"") + HelpExampleRpc("submitblock", "\"mydata\"")); } std::shared_ptr blockptr = std::make_shared(); CBlock &block = *blockptr; if (!DecodeHexBlk(block, request.params[0].get_str())) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); } if (block.vtx.empty() || !block.vtx[0]->IsCoinBase()) { throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block does not start with a coinbase"); } uint256 hash = block.GetHash(); bool fBlockPresent = false; { LOCK(cs_main); BlockMap::iterator mi = mapBlockIndex.find(hash); if (mi != mapBlockIndex.end()) { CBlockIndex *pindex = mi->second; if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) { return "duplicate"; } if (pindex->nStatus & BLOCK_FAILED_MASK) { return "duplicate-invalid"; } // Otherwise, we might only have the header - process the block // before returning fBlockPresent = true; } } submitblock_StateCatcher sc(block.GetHash()); RegisterValidationInterface(&sc); bool fAccepted = ProcessNewBlock(config, blockptr, true, nullptr); UnregisterValidationInterface(&sc); if (fBlockPresent) { if (fAccepted && !sc.found) { return "duplicate-inconclusive"; } return "duplicate"; } if (!sc.found) { return "inconclusive"; } return BIP22ValidationResult(config, sc.state); } static UniValue estimatefee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatefee nblocks\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction to begin\n" "confirmation within nblocks blocks.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "n (numeric) estimated fee-per-kilobyte\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate.\n" "-1 is always returned for nblocks == 1 as it is impossible to " "calculate\n" "a fee that is high enough to get reliably included in the next " "block.\n" "\nExample:\n" + HelpExampleCli("estimatefee", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); if (nBlocks < 1) { nBlocks = 1; } CFeeRate feeRate = mempool.estimateFee(nBlocks); if (feeRate == CFeeRate(Amount(0))) { return -1.0; } return ValueFromAmount(feeRate.GetFeePerK()); } static UniValue estimatepriority(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatepriority nblocks\n" "\nDEPRECATED. Estimates the approximate priority " "a zero-fee transaction needs to begin\n" "confirmation within nblocks blocks.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "n (numeric) estimated priority\n" "\n" "A negative value is returned if not enough " "transactions and blocks\n" "have been observed to make an estimate.\n" "\nExample:\n" + HelpExampleCli("estimatepriority", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); if (nBlocks < 1) { nBlocks = 1; } return mempool.estimatePriority(nBlocks); } static UniValue estimatesmartfee(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatesmartfee nblocks\n" "\nWARNING: This interface is unstable and may disappear or " "change!\n" "\nEstimates the approximate fee per kilobyte needed for a " "transaction to begin\n" "confirmation within nblocks blocks if possible and return the " "number of blocks\n" "for which the estimate is valid.\n" "\nArguments:\n" "1. nblocks (numeric)\n" "\nResult:\n" "{\n" " \"feerate\" : x.x, (numeric) estimate fee-per-kilobyte (in " "BCH)\n" " \"blocks\" : n (numeric) block number where estimate " "was found\n" "}\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate for any number of blocks.\n" "However it will not return a value below the mempool reject fee.\n" "\nExample:\n" + HelpExampleCli("estimatesmartfee", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); UniValue result(UniValue::VOBJ); int answerFound; CFeeRate feeRate = mempool.estimateSmartFee(nBlocks, &answerFound); result.push_back(Pair("feerate", feeRate == CFeeRate(Amount(0)) ? -1.0 : ValueFromAmount(feeRate.GetFeePerK()))); result.push_back(Pair("blocks", answerFound)); return result; } static UniValue estimatesmartpriority(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "estimatesmartpriority nblocks\n" "\nDEPRECATED. WARNING: This interface is unstable and may " "disappear or change!\n" "\nEstimates the approximate priority a zero-fee transaction needs " "to begin\n" "confirmation within nblocks blocks if possible and return the " "number of blocks\n" "for which the estimate is valid.\n" "\nArguments:\n" "1. nblocks (numeric, required)\n" "\nResult:\n" "{\n" " \"priority\" : x.x, (numeric) estimated priority\n" " \"blocks\" : n (numeric) block number where estimate " "was found\n" "}\n" "\n" "A negative value is returned if not enough transactions and " "blocks\n" "have been observed to make an estimate for any number of blocks.\n" "However if the mempool reject fee is set it will return 1e9 * " "MAX_MONEY.\n" "\nExample:\n" + HelpExampleCli("estimatesmartpriority", "6")); } RPCTypeCheck(request.params, {UniValue::VNUM}); int nBlocks = request.params[0].get_int(); UniValue result(UniValue::VOBJ); int answerFound; double priority = mempool.estimateSmartPriority(nBlocks, &answerFound); result.push_back(Pair("priority", priority)); result.push_back(Pair("blocks", answerFound)); return result; } // clang-format off static const CRPCCommand commands[] = { // category name actor (function) okSafeMode // ---------- ------------------------ ---------------------- ---------- {"mining", "getnetworkhashps", getnetworkhashps, true, {"nblocks", "height"}}, {"mining", "getmininginfo", getmininginfo, true, {}}, {"mining", "prioritisetransaction", prioritisetransaction, true, {"txid", "priority_delta", "fee_delta"}}, {"mining", "getblocktemplate", getblocktemplate, true, {"template_request"}}, {"mining", "submitblock", submitblock, true, {"hexdata", "parameters"}}, {"generating", "generatetoaddress", generatetoaddress, true, {"nblocks", "address", "maxtries"}}, {"util", "estimatefee", estimatefee, true, {"nblocks"}}, {"util", "estimatepriority", estimatepriority, true, {"nblocks"}}, {"util", "estimatesmartfee", estimatesmartfee, true, {"nblocks"}}, {"util", "estimatesmartpriority", estimatesmartpriority, true, {"nblocks"}}, }; // clang-format on void RegisterMiningRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) t.appendCommand(commands[vcidx].name, &commands[vcidx]); } diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index 9a0b58f10..e92e634a0 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -1,648 +1,649 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "rpc/misc.h" #include "base58.h" #include "clientversion.h" #include "config.h" #include "dstencode.h" #include "init.h" #include "net.h" #include "netbase.h" #include "rpc/blockchain.h" #include "rpc/server.h" #include "timedata.h" #include "util.h" #include "utilstrencodings.h" #include "validation.h" #ifdef ENABLE_WALLET #include "wallet/rpcwallet.h" #include "wallet/wallet.h" #include "wallet/walletdb.h" #endif #include #include /** * @note Do not add or change anything in the information returned by this * method. `getinfo` exists for backwards-compatibility only. It combines * information from wildly different sources in the program, which is a mess, * and is thus planned to be deprecated eventually. * * Based on the source of the information, new information should be added to: * - `getblockchaininfo`, * - `getnetworkinfo` or * - `getwalletinfo` * * Or alternatively, create a specific query method for the information. **/ static UniValue getinfo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getinfo\n" "\nDEPRECATED. Returns an object containing various state info.\n" "\nResult:\n" "{\n" " \"version\": xxxxx, (numeric) the server version\n" " \"protocolversion\": xxxxx, (numeric) the protocol version\n" " \"walletversion\": xxxxx, (numeric) the wallet version\n" " \"balance\": xxxxxxx, (numeric) the total bitcoin " "balance of the wallet\n" " \"blocks\": xxxxxx, (numeric) the current number of " "blocks processed in the server\n" " \"timeoffset\": xxxxx, (numeric) the time offset\n" " \"connections\": xxxxx, (numeric) the number of " "connections\n" " \"proxy\": \"host:port\", (string, optional) the proxy used " "by the server\n" " \"difficulty\": xxxxxx, (numeric) the current difficulty\n" " \"testnet\": true|false, (boolean) if the server is using " "testnet or not\n" " \"keypoololdest\": xxxxxx, (numeric) the timestamp (seconds " "since Unix epoch) of the oldest pre-generated key in the key " "pool\n" " \"keypoolsize\": xxxx, (numeric) how many new keys are " "pre-generated\n" " \"unlocked_until\": ttt, (numeric) the timestamp in " "seconds since epoch (midnight Jan 1 1970 GMT) that the wallet is " "unlocked for transfers, or 0 if the wallet is locked\n" " \"paytxfee\": x.xxxx, (numeric) the transaction fee set " "in " + CURRENCY_UNIT + "/kB\n" " \"relayfee\": x.xxxx, (numeric) minimum " "relay fee for non-free transactions in " + CURRENCY_UNIT + "/kB\n" " \"errors\": \"...\" (string) any error messages\n" "}\n" "\nExamples:\n" + HelpExampleCli("getinfo", "") + HelpExampleRpc("getinfo", "")); } #ifdef ENABLE_WALLET CWallet *const pwallet = GetWalletForJSONRPCRequest(request); LOCK2(cs_main, pwallet ? &pwallet->cs_wallet : nullptr); #else LOCK(cs_main); #endif proxyType proxy; GetProxy(NET_IPV4, proxy); UniValue obj(UniValue::VOBJ); obj.push_back(Pair("version", CLIENT_VERSION)); obj.push_back(Pair("protocolversion", PROTOCOL_VERSION)); #ifdef ENABLE_WALLET if (pwallet) { obj.push_back(Pair("walletversion", pwallet->GetVersion())); obj.push_back(Pair("balance", ValueFromAmount(pwallet->GetBalance()))); } #endif obj.push_back(Pair("blocks", (int)chainActive.Height())); obj.push_back(Pair("timeoffset", GetTimeOffset())); if (g_connman) { obj.push_back( Pair("connections", (int)g_connman->GetNodeCount(CConnman::CONNECTIONS_ALL))); } obj.push_back(Pair("proxy", (proxy.IsValid() ? proxy.proxy.ToStringIPPort() : std::string()))); obj.push_back(Pair("difficulty", double(GetDifficulty(chainActive.Tip())))); - obj.push_back(Pair( - "testnet", Params().NetworkIDString() == CBaseChainParams::TESTNET)); + obj.push_back(Pair("testnet", + config.GetChainParams().NetworkIDString() == + CBaseChainParams::TESTNET)); #ifdef ENABLE_WALLET if (pwallet) { obj.push_back(Pair("keypoololdest", pwallet->GetOldestKeyPoolTime())); obj.push_back(Pair("keypoolsize", (int)pwallet->GetKeyPoolSize())); } if (pwallet && pwallet->IsCrypted()) { obj.push_back(Pair("unlocked_until", pwallet->nRelockTime)); } obj.push_back(Pair("paytxfee", ValueFromAmount(payTxFee.GetFeePerK()))); #endif obj.push_back( Pair("relayfee", ValueFromAmount(::minRelayTxFee.GetFeePerK()))); obj.push_back(Pair("errors", GetWarnings("statusbar"))); return obj; } #ifdef ENABLE_WALLET class DescribeAddressVisitor : public boost::static_visitor { public: CWallet *const pwallet; DescribeAddressVisitor(CWallet *_pwallet) : pwallet(_pwallet) {} UniValue operator()(const CNoDestination &dest) const { return UniValue(UniValue::VOBJ); } UniValue operator()(const CKeyID &keyID) const { UniValue obj(UniValue::VOBJ); CPubKey vchPubKey; obj.push_back(Pair("isscript", false)); if (pwallet && pwallet->GetPubKey(keyID, vchPubKey)) { obj.push_back(Pair("pubkey", HexStr(vchPubKey))); obj.push_back(Pair("iscompressed", vchPubKey.IsCompressed())); } return obj; } UniValue operator()(const CScriptID &scriptID) const { UniValue obj(UniValue::VOBJ); CScript subscript; obj.push_back(Pair("isscript", true)); if (pwallet && pwallet->GetCScript(scriptID, subscript)) { std::vector addresses; txnouttype whichType; int nRequired; ExtractDestinations(subscript, whichType, addresses, nRequired); obj.push_back(Pair("script", GetTxnOutputType(whichType))); obj.push_back( Pair("hex", HexStr(subscript.begin(), subscript.end()))); UniValue a(UniValue::VARR); for (const CTxDestination &addr : addresses) { a.push_back(EncodeDestination(addr)); } obj.push_back(Pair("addresses", a)); if (whichType == TX_MULTISIG) { obj.push_back(Pair("sigsrequired", nRequired)); } } return obj; } }; #endif static UniValue validateaddress(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "validateaddress \"address\"\n" "\nReturn information about the given bitcoin address.\n" "\nArguments:\n" "1. \"address\" (string, required) The bitcoin address to " "validate\n" "\nResult:\n" "{\n" " \"isvalid\" : true|false, (boolean) If the address is " "valid or not. If not, this is the only property returned.\n" " \"address\" : \"address\", (string) The bitcoin address " "validated\n" " \"scriptPubKey\" : \"hex\", (string) The hex encoded " "scriptPubKey generated by the address\n" " \"ismine\" : true|false, (boolean) If the address is " "yours or not\n" " \"iswatchonly\" : true|false, (boolean) If the address is " "watchonly\n" " \"isscript\" : true|false, (boolean) If the key is a " "script\n" " \"pubkey\" : \"publickeyhex\", (string) The hex value of the " "raw public key\n" " \"iscompressed\" : true|false, (boolean) If the address is " "compressed\n" " \"account\" : \"account\" (string) DEPRECATED. The " "account associated with the address, \"\" is the default account\n" " \"timestamp\" : timestamp, (number, optional) The " "creation time of the key if available in seconds since epoch (Jan " "1 1970 GMT)\n" " \"hdkeypath\" : \"keypath\" (string, optional) The HD " "keypath if the key is HD and available\n" " \"hdmasterkeyid\" : \"\" (string, optional) The " "Hash160 of the HD master pubkey\n" "}\n" "\nExamples:\n" + HelpExampleCli("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"") + HelpExampleRpc("validateaddress", "\"1PSSGeFHDnKNxiEyFrD1wcEaHr9hrQDDWc\"")); } #ifdef ENABLE_WALLET CWallet *const pwallet = GetWalletForJSONRPCRequest(request); LOCK2(cs_main, pwallet ? &pwallet->cs_wallet : nullptr); #else LOCK(cs_main); #endif CTxDestination dest = DecodeDestination(request.params[0].get_str()); bool isValid = IsValidDestination(dest); UniValue ret(UniValue::VOBJ); ret.push_back(Pair("isvalid", isValid)); if (isValid) { std::string currentAddress = EncodeDestination(dest); ret.push_back(Pair("address", currentAddress)); CScript scriptPubKey = GetScriptForDestination(dest); ret.push_back(Pair("scriptPubKey", HexStr(scriptPubKey.begin(), scriptPubKey.end()))); #ifdef ENABLE_WALLET isminetype mine = pwallet ? IsMine(*pwallet, dest) : ISMINE_NO; ret.push_back(Pair("ismine", (mine & ISMINE_SPENDABLE) ? true : false)); ret.push_back( Pair("iswatchonly", (mine & ISMINE_WATCH_ONLY) ? true : false)); UniValue detail = boost::apply_visitor(DescribeAddressVisitor(pwallet), dest); ret.pushKVs(detail); if (pwallet && pwallet->mapAddressBook.count(dest)) { ret.push_back(Pair("account", pwallet->mapAddressBook[dest].name)); } if (pwallet) { const auto &meta = pwallet->mapKeyMetadata; const CKeyID *keyID = boost::get(&dest); auto it = keyID ? meta.find(*keyID) : meta.end(); if (it == meta.end()) { it = meta.find(CScriptID(scriptPubKey)); } if (it != meta.end()) { ret.push_back(Pair("timestamp", it->second.nCreateTime)); if (!it->second.hdKeypath.empty()) { ret.push_back(Pair("hdkeypath", it->second.hdKeypath)); ret.push_back(Pair("hdmasterkeyid", it->second.hdMasterKeyID.GetHex())); } } } #endif } return ret; } // Needed even with !ENABLE_WALLET, to pass (ignored) pointers around class CWallet; /** * Used by addmultisigaddress / createmultisig: */ CScript createmultisig_redeemScript(CWallet *const pwallet, const UniValue ¶ms) { int nRequired = params[0].get_int(); const UniValue &keys = params[1].get_array(); // Gather public keys if (nRequired < 1) { throw std::runtime_error( "a multisignature address must require at least one key to redeem"); } if ((int)keys.size() < nRequired) { throw std::runtime_error( strprintf("not enough keys supplied " "(got %u keys, but need at least %d to redeem)", keys.size(), nRequired)); } if (keys.size() > 16) { throw std::runtime_error( "Number of addresses involved in the " "multisignature address creation > 16\nReduce the " "number"); } std::vector pubkeys; pubkeys.resize(keys.size()); for (size_t i = 0; i < keys.size(); i++) { const std::string &ks = keys[i].get_str(); #ifdef ENABLE_WALLET // Case 1: Bitcoin address and we have full public key: CTxDestination dest = DecodeDestination(ks); if (pwallet && IsValidDestination(dest)) { const CKeyID *keyID = boost::get(&dest); if (!keyID) { throw std::runtime_error( strprintf("%s does not refer to a key", ks)); } CPubKey vchPubKey; if (!pwallet->GetPubKey(*keyID, vchPubKey)) { throw std::runtime_error( strprintf("no full public key for address %s", ks)); } if (!vchPubKey.IsFullyValid()) { throw std::runtime_error(" Invalid public key: " + ks); } pubkeys[i] = vchPubKey; continue; } #endif // Case 2: hex public key if (IsHex(ks)) { CPubKey vchPubKey(ParseHex(ks)); if (!vchPubKey.IsFullyValid()) { throw std::runtime_error(" Invalid public key: " + ks); } pubkeys[i] = vchPubKey; } else { throw std::runtime_error(" Invalid public key: " + ks); } } CScript result = GetScriptForMultisig(nRequired, pubkeys); if (result.size() > MAX_SCRIPT_ELEMENT_SIZE) { throw std::runtime_error( strprintf("redeemScript exceeds size limit: %d > %d", result.size(), MAX_SCRIPT_ELEMENT_SIZE)); } return result; } static UniValue createmultisig(const Config &config, const JSONRPCRequest &request) { #ifdef ENABLE_WALLET CWallet *const pwallet = GetWalletForJSONRPCRequest(request); #else CWallet *const pwallet = nullptr; #endif if (request.fHelp || request.params.size() < 2 || request.params.size() > 2) { std::string msg = "createmultisig nrequired [\"key\",...]\n" "\nCreates a multi-signature address with n signature of m keys " "required.\n" "It returns a json object with the address and redeemScript.\n" "\nArguments:\n" "1. nrequired (numeric, required) The number of required " "signatures out of the n keys or addresses.\n" "2. \"keys\" (string, required) A json array of keys which " "are bitcoin addresses or hex-encoded public keys\n" " [\n" " \"key\" (string) bitcoin address or hex-encoded public " "key\n" " ,...\n" " ]\n" "\nResult:\n" "{\n" " \"address\":\"multisigaddress\", (string) The value of the new " "multisig address.\n" " \"redeemScript\":\"script\" (string) The string value of " "the hex-encoded redemption script.\n" "}\n" "\nExamples:\n" "\nCreate a multisig address from 2 addresses\n" + HelpExampleCli("createmultisig", "2 " "\"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\"," "\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\"") + "\nAs a json rpc call\n" + HelpExampleRpc("createmultisig", "2, " "\"[\\\"16sSauSf5pF2UkUwvKGq4qjNRzBZYqgEL5\\\"," "\\\"171sgjn4YtPu27adkKGrdDwzRTxnRkBfKV\\\"]\""); throw std::runtime_error(msg); } // Construct using pay-to-script-hash: CScript inner = createmultisig_redeemScript(pwallet, request.params); CScriptID innerID(inner); UniValue result(UniValue::VOBJ); result.push_back(Pair("address", EncodeDestination(innerID))); result.push_back(Pair("redeemScript", HexStr(inner.begin(), inner.end()))); return result; } static UniValue verifymessage(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 3) { throw std::runtime_error( "verifymessage \"address\" \"signature\" \"message\"\n" "\nVerify a signed message\n" "\nArguments:\n" "1. \"address\" (string, required) The bitcoin address to " "use for the signature.\n" "2. \"signature\" (string, required) The signature provided " "by the signer in base 64 encoding (see signmessage).\n" "3. \"message\" (string, required) The message that was " "signed.\n" "\nResult:\n" "true|false (boolean) If the signature is verified or not.\n" "\nExamples:\n" "\nUnlock the wallet for 30 seconds\n" + HelpExampleCli("walletpassphrase", "\"mypassphrase\" 30") + "\nCreate the signature\n" + HelpExampleCli( "signmessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\" \"my message\"") + "\nVerify the signature\n" + HelpExampleCli("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4" "XX\" \"signature\" \"my " "message\"") + "\nAs json rpc\n" + HelpExampleRpc("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4" "XX\", \"signature\", \"my " "message\"")); } LOCK(cs_main); std::string strAddress = request.params[0].get_str(); std::string strSign = request.params[1].get_str(); std::string strMessage = request.params[2].get_str(); CTxDestination destination = DecodeDestination(strAddress); if (!IsValidDestination(destination)) { throw JSONRPCError(RPC_TYPE_ERROR, "Invalid address"); } const CKeyID *keyID = boost::get(&destination); if (!keyID) { throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to key"); } bool fInvalid = false; std::vector vchSig = DecodeBase64(strSign.c_str(), &fInvalid); if (fInvalid) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Malformed base64 encoding"); } CHashWriter ss(SER_GETHASH, 0); ss << strMessageMagic; ss << strMessage; CPubKey pubkey; if (!pubkey.RecoverCompact(ss.GetHash(), vchSig)) { return false; } return (pubkey.GetID() == *keyID); } static UniValue signmessagewithprivkey(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 2) { throw std::runtime_error( "signmessagewithprivkey \"privkey\" \"message\"\n" "\nSign a message with the private key of an address\n" "\nArguments:\n" "1. \"privkey\" (string, required) The private key to sign " "the message with.\n" "2. \"message\" (string, required) The message to create a " "signature of.\n" "\nResult:\n" "\"signature\" (string) The signature of the message " "encoded in base 64\n" "\nExamples:\n" "\nCreate the signature\n" + HelpExampleCli("signmessagewithprivkey", "\"privkey\" \"my message\"") + "\nVerify the signature\n" + HelpExampleCli("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4" "XX\" \"signature\" \"my " "message\"") + "\nAs json rpc\n" + HelpExampleRpc("signmessagewithprivkey", "\"privkey\", \"my message\"")); } std::string strPrivkey = request.params[0].get_str(); std::string strMessage = request.params[1].get_str(); CBitcoinSecret vchSecret; bool fGood = vchSecret.SetString(strPrivkey); if (!fGood) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key"); } CKey key = vchSecret.GetKey(); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Private key outside allowed range"); } CHashWriter ss(SER_GETHASH, 0); ss << strMessageMagic; ss << strMessage; std::vector vchSig; if (!key.SignCompact(ss.GetHash(), vchSig)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Sign failed"); } return EncodeBase64(&vchSig[0], vchSig.size()); } static UniValue setmocktime(const Config &config, const JSONRPCRequest &request) { if (request.fHelp || request.params.size() != 1) { throw std::runtime_error( "setmocktime timestamp\n" "\nSet the local time to given timestamp (-regtest only)\n" "\nArguments:\n" "1. timestamp (integer, required) Unix seconds-since-epoch " "timestamp\n" " Pass 0 to go back to using the system time."); } - if (!Params().MineBlocksOnDemand()) { + if (!config.GetChainParams().MineBlocksOnDemand()) { throw std::runtime_error( "setmocktime for regression testing (-regtest mode) only"); } // For now, don't change mocktime if we're in the middle of validation, as // this could have an effect on mempool time-based eviction, as well as // IsCurrentForFeeEstimation() and IsInitialBlockDownload(). // TODO: figure out the right way to synchronize around mocktime, and // ensure all callsites of GetTime() are accessing this safely. LOCK(cs_main); RPCTypeCheck(request.params, {UniValue::VNUM}); SetMockTime(request.params[0].get_int64()); return NullUniValue; } static UniValue RPCLockedMemoryInfo() { LockedPool::Stats stats = LockedPoolManager::Instance().stats(); UniValue obj(UniValue::VOBJ); obj.push_back(Pair("used", uint64_t(stats.used))); obj.push_back(Pair("free", uint64_t(stats.free))); obj.push_back(Pair("total", uint64_t(stats.total))); obj.push_back(Pair("locked", uint64_t(stats.locked))); obj.push_back(Pair("chunks_used", uint64_t(stats.chunks_used))); obj.push_back(Pair("chunks_free", uint64_t(stats.chunks_free))); return obj; } static UniValue getmemoryinfo(const Config &config, const JSONRPCRequest &request) { /* Please, avoid using the word "pool" here in the RPC interface or help, * as users will undoubtedly confuse it with the other "memory pool" */ if (request.fHelp || request.params.size() != 0) { throw std::runtime_error( "getmemoryinfo\n" "Returns an object containing information about memory usage.\n" "\nResult:\n" "{\n" " \"locked\": { (json object) Information about " "locked memory manager\n" " \"used\": xxxxx, (numeric) Number of bytes used\n" " \"free\": xxxxx, (numeric) Number of bytes available " "in current arenas\n" " \"total\": xxxxxxx, (numeric) Total number of bytes " "managed\n" " \"locked\": xxxxxx, (numeric) Amount of bytes that " "succeeded locking. If this number is smaller than total, locking " "pages failed at some point and key data could be swapped to " "disk.\n" " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n" " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n" " }\n" "}\n" "\nExamples:\n" + HelpExampleCli("getmemoryinfo", "") + HelpExampleRpc("getmemoryinfo", "")); } UniValue obj(UniValue::VOBJ); obj.push_back(Pair("locked", RPCLockedMemoryInfo())); return obj; } static UniValue echo(const Config &config, const JSONRPCRequest &request) { if (request.fHelp) { throw std::runtime_error( "echo|echojson \"message\" ...\n" "\nSimply echo back the input arguments. This command is for " "testing.\n" "\nThe difference between echo and echojson is that echojson has " "argument conversion enabled in the client-side table in" "bitcoin-cli and the GUI. There is no server-side difference."); } return request.params; } // clang-format off static const CRPCCommand commands[] = { // category name actor (function) okSafeMode // ------------------- ------------------------ ---------------------- ---------- { "control", "getinfo", getinfo, true, {} }, /* uses wallet if enabled */ { "control", "getmemoryinfo", getmemoryinfo, true, {} }, { "util", "validateaddress", validateaddress, true, {"address"} }, /* uses wallet if enabled */ { "util", "createmultisig", createmultisig, true, {"nrequired","keys"} }, { "util", "verifymessage", verifymessage, true, {"address","signature","message"} }, { "util", "signmessagewithprivkey", signmessagewithprivkey, true, {"privkey","message"} }, /* Not shown in help */ { "hidden", "setmocktime", setmocktime, true, {"timestamp"}}, { "hidden", "echo", echo, true, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, { "hidden", "echojson", echo, true, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, }; // clang-format on void RegisterMiscRPCCommands(CRPCTable &t) { for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) { t.appendCommand(commands[vcidx].name, &commands[vcidx]); } } diff --git a/src/secp256k1/src/modules/recovery/main_impl.h b/src/secp256k1/src/modules/recovery/main_impl.h old mode 100755 new mode 100644