diff --git a/src/logging.cpp b/src/logging.cpp index 1315fe57b..7abf25d85 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -1,352 +1,352 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include bool fLogIPs = DEFAULT_LOGIPS; const char *const DEFAULT_DEBUGLOGFILE = "debug.log"; BCLog::Logger &LogInstance() { /** * NOTE: the logger instance is leaked on exit. This is ugly, but will be * cleaned up by the OS/libc. Defining a logger as a global object doesn't * work since the order of destruction of static/global objects is * undefined. Consider if the logger gets destroyed, and then some later * destructor calls LogPrintf, maybe indirectly, and you get a core dump at * shutdown trying to access the logger. When the shutdown sequence is fully * audited and tested, explicit destruction of these objects can be * implemented by changing this from a raw pointer to a std::unique_ptr. * Since the ~Logger() destructor is never called, the Logger class and all * its subclasses must have implicitly-defined destructors. * * This method of initialization was originally introduced in * ee3374234c60aba2cc4c5cd5cac1c0aefc2d817c. */ static BCLog::Logger *g_logger{new BCLog::Logger()}; return *g_logger; } static int FileWriteStr(const std::string &str, FILE *fp) { return fwrite(str.data(), 1, str.size(), fp); } bool BCLog::Logger::StartLogging() { - std::lock_guard scoped_lock(m_cs); + LockGuard scoped_lock(m_cs); assert(m_buffering); assert(m_fileout == nullptr); if (m_print_to_file) { assert(!m_file_path.empty()); m_fileout = fsbridge::fopen(m_file_path, "a"); if (!m_fileout) { return false; } // Unbuffered. setbuf(m_fileout, nullptr); // Add newlines to the logfile to distinguish this execution from the // last one. FileWriteStr("\n\n\n\n\n", m_fileout); } // Dump buffered messages from before we opened the log. m_buffering = false; while (!m_msgs_before_open.empty()) { const std::string &s = m_msgs_before_open.front(); if (m_print_to_file) { FileWriteStr(s, m_fileout); } if (m_print_to_console) { fwrite(s.data(), 1, s.size(), stdout); } for (const auto &cb : m_print_callbacks) { cb(s); } m_msgs_before_open.pop_front(); } if (m_print_to_console) { fflush(stdout); } return true; } void BCLog::Logger::DisconnectTestLogger() { - std::lock_guard scoped_lock(m_cs); + LockGuard scoped_lock(m_cs); m_buffering = true; if (m_fileout != nullptr) { fclose(m_fileout); } m_fileout = nullptr; m_print_callbacks.clear(); } struct CLogCategoryDesc { BCLog::LogFlags flag; std::string category; }; const CLogCategoryDesc LogCategories[] = { {BCLog::NONE, "0"}, {BCLog::NONE, "none"}, {BCLog::NET, "net"}, {BCLog::TOR, "tor"}, {BCLog::MEMPOOL, "mempool"}, {BCLog::HTTP, "http"}, {BCLog::BENCH, "bench"}, {BCLog::ZMQ, "zmq"}, {BCLog::DB, "db"}, {BCLog::RPC, "rpc"}, {BCLog::ESTIMATEFEE, "estimatefee"}, {BCLog::ADDRMAN, "addrman"}, {BCLog::SELECTCOINS, "selectcoins"}, {BCLog::REINDEX, "reindex"}, {BCLog::CMPCTBLOCK, "cmpctblock"}, {BCLog::RAND, "rand"}, {BCLog::PRUNE, "prune"}, {BCLog::PROXY, "proxy"}, {BCLog::MEMPOOLREJ, "mempoolrej"}, {BCLog::LIBEVENT, "libevent"}, {BCLog::COINDB, "coindb"}, {BCLog::QT, "qt"}, {BCLog::LEVELDB, "leveldb"}, {BCLog::VALIDATION, "validation"}, {BCLog::ALL, "1"}, {BCLog::ALL, "all"}, }; bool GetLogCategory(BCLog::LogFlags &flag, const std::string &str) { if (str == "") { flag = BCLog::ALL; return true; } for (const CLogCategoryDesc &category_desc : LogCategories) { if (category_desc.category == str) { flag = category_desc.flag; return true; } } return false; } std::string ListLogCategories() { std::string ret; int outcount = 0; for (const CLogCategoryDesc &category_desc : LogCategories) { // Omit the special cases. if (category_desc.flag != BCLog::NONE && category_desc.flag != BCLog::ALL) { if (outcount != 0) { ret += ", "; } ret += category_desc.category; outcount++; } } return ret; } std::vector ListActiveLogCategories() { std::vector ret; for (const CLogCategoryDesc &category_desc : LogCategories) { // Omit the special cases. if (category_desc.flag != BCLog::NONE && category_desc.flag != BCLog::ALL) { CLogCategoryActive catActive; catActive.category = category_desc.category; catActive.active = LogAcceptCategory(category_desc.flag); ret.push_back(catActive); } } return ret; } BCLog::Logger::~Logger() { if (m_fileout) { fclose(m_fileout); } } std::string BCLog::Logger::LogTimestampStr(const std::string &str) { std::string strStamped; if (!m_log_timestamps) { return str; } if (m_started_new_line) { int64_t nTimeMicros = GetTimeMicros(); strStamped = FormatISO8601DateTime(nTimeMicros / 1000000); if (m_log_time_micros) { strStamped.pop_back(); strStamped += strprintf(".%06dZ", nTimeMicros % 1000000); } int64_t mocktime = GetMockTime(); if (mocktime) { strStamped += " (mocktime: " + FormatISO8601DateTime(mocktime) + ")"; } strStamped += ' ' + str; } else { strStamped = str; } return strStamped; } namespace BCLog { /** Belts and suspenders: make sure outgoing log messages don't contain * potentially suspicious characters, such as terminal control codes. * * This escapes control characters except newline ('\n') in C syntax. * It escapes instead of removes them to still allow for troubleshooting * issues where they accidentally end up in strings. */ std::string LogEscapeMessage(const std::string &str) { std::string ret; for (char ch_in : str) { uint8_t ch = (uint8_t)ch_in; if ((ch >= 32 || ch == '\n') && ch != '\x7f') { ret += ch_in; } else { ret += strprintf("\\x%02x", ch); } } return ret; } } // namespace BCLog void BCLog::Logger::LogPrintStr(const std::string &str) { - std::lock_guard scoped_lock(m_cs); + LockGuard scoped_lock(m_cs); std::string str_prefixed = LogEscapeMessage(str); if (m_log_threadnames && m_started_new_line) { str_prefixed.insert(0, "[" + util::ThreadGetInternalName() + "] "); } str_prefixed = LogTimestampStr(str_prefixed); m_started_new_line = !str.empty() && str[str.size() - 1] == '\n'; if (m_buffering) { // buffer if we haven't started logging yet m_msgs_before_open.push_back(str_prefixed); return; } if (m_print_to_console) { // Print to console. fwrite(str_prefixed.data(), 1, str_prefixed.size(), stdout); fflush(stdout); } for (const auto &cb : m_print_callbacks) { cb(str_prefixed); } if (m_print_to_file) { assert(m_fileout != nullptr); // Reopen the log file, if requested. if (m_reopen_file) { m_reopen_file = false; FILE *new_fileout = fsbridge::fopen(m_file_path, "a"); if (new_fileout) { // unbuffered. setbuf(m_fileout, nullptr); fclose(m_fileout); m_fileout = new_fileout; } } FileWriteStr(str_prefixed, m_fileout); } } void BCLog::Logger::ShrinkDebugFile() { // Amount of debug.log to save at end when shrinking (must fit in memory) constexpr size_t RECENT_DEBUG_HISTORY_SIZE = 10 * 1000000; assert(!m_file_path.empty()); // Scroll debug.log if it's getting too big. FILE *file = fsbridge::fopen(m_file_path, "r"); // Special files (e.g. device nodes) may not have a size. size_t log_size = 0; try { log_size = fs::file_size(m_file_path); } catch (const fs::filesystem_error &) { } // If debug.log file is more than 10% bigger the RECENT_DEBUG_HISTORY_SIZE // trim it down by saving only the last RECENT_DEBUG_HISTORY_SIZE bytes. if (file && log_size > 11 * (RECENT_DEBUG_HISTORY_SIZE / 10)) { // Restart the file with some of the end. std::vector vch(RECENT_DEBUG_HISTORY_SIZE, 0); if (fseek(file, -((long)vch.size()), SEEK_END)) { LogPrintf("Failed to shrink debug log file: fseek(...) failed\n"); fclose(file); return; } int nBytes = fread(vch.data(), 1, vch.size(), file); fclose(file); file = fsbridge::fopen(m_file_path, "w"); if (file) { fwrite(vch.data(), 1, nBytes, file); fclose(file); } } else if (file != nullptr) { fclose(file); } } void BCLog::Logger::EnableCategory(LogFlags category) { m_categories |= category; } bool BCLog::Logger::EnableCategory(const std::string &str) { BCLog::LogFlags flag; if (!GetLogCategory(flag, str)) { return false; } EnableCategory(flag); return true; } void BCLog::Logger::DisableCategory(LogFlags category) { m_categories &= ~category; } bool BCLog::Logger::DisableCategory(const std::string &str) { BCLog::LogFlags flag; if (!GetLogCategory(flag, str)) { return false; } DisableCategory(flag); return true; } bool BCLog::Logger::WillLogCategory(LogFlags category) const { // ALL is not meant to be used as a logging category, but only as a mask // representing all categories. if (category == BCLog::NONE || category == BCLog::ALL) { LogPrintf("Error trying to log using a category mask instead of an " "explicit category.\n"); return true; } return (m_categories.load(std::memory_order_relaxed) & category) != 0; } bool BCLog::Logger::DefaultShrinkDebugFile() const { return m_categories != BCLog::NONE; } diff --git a/src/logging.h b/src/logging.h index ddaaceaa6..45f7bfb81 100644 --- a/src/logging.h +++ b/src/logging.h @@ -1,204 +1,204 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_LOGGING_H #define BITCOIN_LOGGING_H #include +#include #include #include #include #include #include #include static const bool DEFAULT_LOGTIMEMICROS = false; static const bool DEFAULT_LOGIPS = false; static const bool DEFAULT_LOGTIMESTAMPS = true; static const bool DEFAULT_LOGTHREADNAMES = false; extern bool fLogIPs; extern const char *const DEFAULT_DEBUGLOGFILE; struct CLogCategoryActive { std::string category; bool active; }; namespace BCLog { enum LogFlags : uint32_t { NONE = 0, NET = (1 << 0), TOR = (1 << 1), MEMPOOL = (1 << 2), HTTP = (1 << 3), BENCH = (1 << 4), ZMQ = (1 << 5), DB = (1 << 6), RPC = (1 << 7), ESTIMATEFEE = (1 << 8), ADDRMAN = (1 << 9), SELECTCOINS = (1 << 10), REINDEX = (1 << 11), CMPCTBLOCK = (1 << 12), RAND = (1 << 13), PRUNE = (1 << 14), PROXY = (1 << 15), MEMPOOLREJ = (1 << 16), LIBEVENT = (1 << 17), COINDB = (1 << 18), QT = (1 << 19), LEVELDB = (1 << 20), VALIDATION = (1 << 21), ALL = ~uint32_t(0), }; class Logger { private: // Can not use Mutex from sync.h because in debug mode it would cause a // deadlock when a potential deadlock was detected mutable std::mutex m_cs; - // GUARDED_BY(m_cs) - FILE *m_fileout = nullptr; - // GUARDED_BY(m_cs) - std::list m_msgs_before_open; - //! Buffer messages before logging can be started. GUARDED_BY(m_cs) - bool m_buffering{true}; + + FILE *m_fileout GUARDED_BY(m_cs) = nullptr; + std::list m_msgs_before_open GUARDED_BY(m_cs); + //! Buffer messages before logging can be started. + bool m_buffering GUARDED_BY(m_cs) = true; /** * m_started_new_line is a state variable that will suppress printing of the * timestamp when multiple calls are made that don't end in a newline. */ std::atomic_bool m_started_new_line{true}; /** * Log categories bitfield. */ std::atomic m_categories{0}; std::string LogTimestampStr(const std::string &str); /** Slots that connect to the print signal */ std::list> - m_print_callbacks /* GUARDED_BY(m_cs) */ {}; + m_print_callbacks GUARDED_BY(m_cs){}; public: bool m_print_to_console = false; bool m_print_to_file = false; bool m_log_timestamps = DEFAULT_LOGTIMESTAMPS; bool m_log_time_micros = DEFAULT_LOGTIMEMICROS; bool m_log_threadnames = DEFAULT_LOGTHREADNAMES; fs::path m_file_path; std::atomic m_reopen_file{false}; ~Logger(); /** Send a string to the log output */ void LogPrintStr(const std::string &str); /** Returns whether logs will be written to any output */ bool Enabled() const { - std::lock_guard scoped_lock(m_cs); + LockGuard scoped_lock(m_cs); return m_buffering || m_print_to_console || m_print_to_file || !m_print_callbacks.empty(); } /** Connect a slot to the print signal and return the connection */ std::list>::iterator PushBackCallback(std::function fun) { - std::lock_guard scoped_lock(m_cs); + LockGuard scoped_lock(m_cs); m_print_callbacks.push_back(std::move(fun)); return --m_print_callbacks.end(); } /** Delete a connection */ void DeleteCallback( std::list>::iterator it) { - std::lock_guard scoped_lock(m_cs); + LockGuard scoped_lock(m_cs); m_print_callbacks.erase(it); } /** Start logging (and flush all buffered messages) */ bool StartLogging(); /** Only for testing */ void DisconnectTestLogger(); void ShrinkDebugFile(); uint32_t GetCategoryMask() const { return m_categories.load(); } void EnableCategory(LogFlags category); bool EnableCategory(const std::string &str); void DisableCategory(LogFlags category); bool DisableCategory(const std::string &str); /** Return true if log accepts specified category */ bool WillLogCategory(LogFlags category) const; /** Default for whether ShrinkDebugFile should be run */ bool DefaultShrinkDebugFile() const; }; } // namespace BCLog BCLog::Logger &LogInstance(); /** Return true if log accepts specified category */ static inline bool LogAcceptCategory(BCLog::LogFlags category) { return LogInstance().WillLogCategory(category); } /** Returns a string with the log categories. */ std::string ListLogCategories(); /** Returns a vector of the active log categories. */ std::vector ListActiveLogCategories(); /** Return true if str parses as a log category and set the flag */ bool GetLogCategory(BCLog::LogFlags &flag, const std::string &str); // Be conservative when using LogPrintf/error or other things which // unconditionally log to debug.log! It should not be the case that an inbound // peer can fill up a user's disk with debug.log entries. template static inline void LogPrintf(const char *fmt, const Args &... args) { if (LogInstance().Enabled()) { std::string log_msg; try { log_msg = tfm::format(fmt, args...); } catch (tinyformat::format_error &fmterr) { /** * Original format string will have newline so don't add one here */ log_msg = "Error \"" + std::string(fmterr.what()) + "\" while formatting log message: " + fmt; } LogInstance().LogPrintStr(log_msg); } } // Use a macro instead of a function for conditional logging to prevent // evaluating arguments when logging for the category is not enabled. #define LogPrint(category, ...) \ do { \ if (LogAcceptCategory((category))) { \ LogPrintf(__VA_ARGS__); \ } \ } while (0) /** * These are aliases used to explicitly state that the message should not end * with a newline character. It allows for detecting the missing newlines that * could make the logs hard to read. */ #define LogPrintfToBeContinued LogPrintf #define LogPrintToBeContinued LogPrint #endif // BITCOIN_LOGGING_H diff --git a/src/net.cpp b/src/net.cpp index a2717aa47..2be34bc41 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1,3049 +1,3050 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include #endif #include #include #include #include #include #include #include #include #include #include #include #ifdef WIN32 #include #else #include #endif #ifdef USE_POLL #include #endif #ifdef USE_UPNP #include #include #include #endif #include #include // How often to dump addresses to peers.dat static constexpr std::chrono::minutes DUMP_PEERS_INTERVAL{15}; // We add a random period time (0 to 1 seconds) to feeler connections to prevent // synchronization. #define FEELER_SLEEP_WINDOW 1 // MSG_NOSIGNAL is not available on some platforms, if it doesn't exist define // it as 0 #if !defined(MSG_NOSIGNAL) #define MSG_NOSIGNAL 0 #endif // MSG_DONTWAIT is not available on some platforms, if it doesn't exist define // it as 0 #if !defined(MSG_DONTWAIT) #define MSG_DONTWAIT 0 #endif // Fix for ancient MinGW versions, that don't have defined these in ws2tcpip.h. // Todo: Can be removed when our pull-tester is upgraded to a modern MinGW // version. #ifdef WIN32 #ifndef PROTECTION_LEVEL_UNRESTRICTED #define PROTECTION_LEVEL_UNRESTRICTED 10 #endif #ifndef IPV6_PROTECTION_LEVEL #define IPV6_PROTECTION_LEVEL 23 #endif #endif /** Used to pass flags to the Bind() function */ enum BindFlags { BF_NONE = 0, BF_EXPLICIT = (1U << 0), BF_REPORT_ERROR = (1U << 1), }; // The set of sockets cannot be modified while waiting // The sleep time needs to be small to avoid new sockets stalling static const uint64_t SELECT_TIMEOUT_MILLISECONDS = 50; const std::string NET_MESSAGE_COMMAND_OTHER = "*other*"; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("localhostnonce")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // // Global state variables // bool fDiscover = true; bool fListen = true; bool g_relay_txes = !DEFAULT_BLOCKSONLY; RecursiveMutex cs_mapLocalHost; std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {}; void CConnman::AddOneShot(const std::string &strDest) { LOCK(cs_vOneShots); vOneShots.push_back(strDest); } unsigned short GetListenPort() { return (unsigned short)(gArgs.GetArg("-port", Params().GetDefaultPort())); } // find 'best' local address for a particular peer bool GetLocal(CService &addr, const CNetAddr *paddrPeer) { if (!fListen) { return false; } int nBestScore = -1; int nBestReachability = -1; { LOCK(cs_mapLocalHost); for (const auto &entry : mapLocalHost) { int nScore = entry.second.nScore; int nReachability = entry.first.GetReachabilityFrom(paddrPeer); if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore)) { addr = CService(entry.first, entry.second.nPort); nBestReachability = nReachability; nBestScore = nScore; } } } return nBestScore >= 0; } //! Convert the pnSeed6 array into usable address objects. static std::vector convertSeed6(const std::vector &vSeedsIn) { // It'll only connect to one or two seed nodes because once it connects, // it'll get a pile of addresses with newer timestamps. Seed nodes are given // a random 'last seen time' of between one and two weeks ago. const int64_t nOneWeek = 7 * 24 * 60 * 60; std::vector vSeedsOut; vSeedsOut.reserve(vSeedsIn.size()); FastRandomContext rng; for (const auto &seed_in : vSeedsIn) { struct in6_addr ip; memcpy(&ip, seed_in.addr, sizeof(ip)); CAddress addr(CService(ip, seed_in.port), GetDesirableServiceFlags(NODE_NONE)); addr.nTime = GetTime() - rng.randrange(nOneWeek) - nOneWeek; vSeedsOut.push_back(addr); } return vSeedsOut; } // Get best local address for a particular peer as a CAddress. Otherwise, return // the unroutable 0.0.0.0 but filled in with the normal parameters, since the IP // may be changed to a useful one by discovery. CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices) { CAddress ret(CService(CNetAddr(), GetListenPort()), nLocalServices); CService addr; if (GetLocal(addr, paddrPeer)) { ret = CAddress(addr, nLocalServices); } ret.nTime = GetAdjustedTime(); return ret; } static int GetnScore(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) { return 0; } return mapLocalHost[addr].nScore; } // Is our peer's addrLocal potentially useful as an external IP source? bool IsPeerAddrLocalGood(CNode *pnode) { CService addrLocal = pnode->GetAddrLocal(); return fDiscover && pnode->addr.IsRoutable() && addrLocal.IsRoutable() && IsReachable(addrLocal.GetNetwork()); } // Pushes our own address to a peer. void AdvertiseLocal(CNode *pnode) { if (fListen && pnode->fSuccessfullyConnected) { CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); if (gArgs.GetBoolArg("-addrmantest", false)) { // use IPv4 loopback during addrmantest addrLocal = CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())), pnode->GetLocalServices()); } // If discovery is enabled, sometimes give our peer the address it // tells us that it sees us as in case it has a better idea of our // address than we do. FastRandomContext rng; if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) { addrLocal.SetIP(pnode->GetAddrLocal()); } if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) { LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString()); pnode->PushAddress(addrLocal, rng); } } } // Learn a new local address. bool AddLocal(const CService &addr, int nScore) { if (!addr.IsRoutable()) { return false; } if (!fDiscover && nScore < LOCAL_MANUAL) { return false; } if (!IsReachable(addr)) { return false; } LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore); { LOCK(cs_mapLocalHost); bool fAlready = mapLocalHost.count(addr) > 0; LocalServiceInfo &info = mapLocalHost[addr]; if (!fAlready || nScore >= info.nScore) { info.nScore = nScore + (fAlready ? 1 : 0); info.nPort = addr.GetPort(); } } return true; } bool AddLocal(const CNetAddr &addr, int nScore) { return AddLocal(CService(addr, GetListenPort()), nScore); } void RemoveLocal(const CService &addr) { LOCK(cs_mapLocalHost); LogPrintf("RemoveLocal(%s)\n", addr.ToString()); mapLocalHost.erase(addr); } void SetReachable(enum Network net, bool reachable) { if (net == NET_UNROUTABLE || net == NET_INTERNAL) { return; } LOCK(cs_mapLocalHost); vfLimited[net] = !reachable; } bool IsReachable(enum Network net) { LOCK(cs_mapLocalHost); return !vfLimited[net]; } bool IsReachable(const CNetAddr &addr) { return IsReachable(addr.GetNetwork()); } /** vote for a local address */ bool SeenLocal(const CService &addr) { LOCK(cs_mapLocalHost); if (mapLocalHost.count(addr) == 0) { return false; } mapLocalHost[addr].nScore++; return true; } /** check whether a given address is potentially local */ bool IsLocal(const CService &addr) { LOCK(cs_mapLocalHost); return mapLocalHost.count(addr) > 0; } CNode *CConnman::FindNode(const CNetAddr &ip) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (static_cast(pnode->addr) == ip) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CSubNet &subNet) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subNet.Match(static_cast(pnode->addr))) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const std::string &addrName) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->GetAddrName() == addrName) { return pnode; } } return nullptr; } CNode *CConnman::FindNode(const CService &addr) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (static_cast(pnode->addr) == addr) { return pnode; } } return nullptr; } bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce) { return false; } } return true; } /** Get the bind address for a socket as CAddress */ static CAddress GetBindAddress(SOCKET sock) { CAddress addr_bind; struct sockaddr_storage sockaddr_bind; socklen_t sockaddr_bind_len = sizeof(sockaddr_bind); if (sock != INVALID_SOCKET) { if (!getsockname(sock, (struct sockaddr *)&sockaddr_bind, &sockaddr_bind_len)) { addr_bind.SetSockAddr((const struct sockaddr *)&sockaddr_bind); } else { LogPrint(BCLog::NET, "Warning: getsockname failed\n"); } } return addr_bind; } CNode *CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only) { if (pszDest == nullptr) { if (IsLocal(addrConnect)) { return nullptr; } // Look for an existing connection CNode *pnode = FindNode(static_cast(addrConnect)); if (pnode) { LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } /// debug print LogPrint(BCLog::NET, "trying connection %s lastseen=%.1fhrs\n", pszDest ? pszDest : addrConnect.ToString(), pszDest ? 0.0 : (double)(GetAdjustedTime() - addrConnect.nTime) / 3600.0); // Resolve const int default_port = Params().GetDefaultPort(); if (pszDest) { std::vector resolved; if (Lookup(pszDest, resolved, default_port, fNameLookup && !HaveNameProxy(), 256) && !resolved.empty()) { addrConnect = CAddress(resolved[GetRand(resolved.size())], NODE_NONE); if (!addrConnect.IsValid()) { LogPrint(BCLog::NET, "Resolver returned invalid address %s for %s\n", addrConnect.ToString(), pszDest); return nullptr; } // It is possible that we already have a connection to the IP/port // pszDest resolved to. In that case, drop the connection that was // just created, and return the existing CNode instead. Also store // the name we used to connect in that CNode, so that future // FindNode() calls to that name catch this early. LOCK(cs_vNodes); CNode *pnode = FindNode(static_cast(addrConnect)); if (pnode) { pnode->MaybeSetAddrName(std::string(pszDest)); LogPrintf("Failed to open new connection, already connected\n"); return nullptr; } } } // Connect bool connected = false; SOCKET hSocket = INVALID_SOCKET; proxyType proxy; if (addrConnect.IsValid()) { bool proxyConnectionFailed = false; if (GetProxy(addrConnect.GetNetwork(), proxy)) { hSocket = CreateSocket(proxy.proxy); if (hSocket == INVALID_SOCKET) { return nullptr; } connected = ConnectThroughProxy( proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), hSocket, nConnectTimeout, &proxyConnectionFailed); } else { // no proxy needed (none set for target network) hSocket = CreateSocket(addrConnect); if (hSocket == INVALID_SOCKET) { return nullptr; } connected = ConnectSocketDirectly( addrConnect, hSocket, nConnectTimeout, manual_connection); } if (!proxyConnectionFailed) { // If a connection to the node was attempted, and failure (if any) // is not caused by a problem connecting to the proxy, mark this as // an attempt. addrman.Attempt(addrConnect, fCountFailure); } } else if (pszDest && GetNameProxy(proxy)) { hSocket = CreateSocket(proxy.proxy); if (hSocket == INVALID_SOCKET) { return nullptr; } std::string host; int port = default_port; SplitHostPort(std::string(pszDest), port, host); connected = ConnectThroughProxy(proxy, host, port, hSocket, nConnectTimeout, nullptr); } if (!connected) { CloseSocket(hSocket); return nullptr; } // Add node NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); CAddress addr_bind = GetBindAddress(hSocket); CNode *pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", false, block_relay_only); pnode->AddRef(); return pnode; } void CNode::CloseSocketDisconnect() { fDisconnect = true; LOCK(cs_hSocket); if (hSocket != INVALID_SOCKET) { LogPrint(BCLog::NET, "disconnecting peer=%d\n", id); CloseSocket(hSocket); } } void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const { for (const auto &subnet : vWhitelistedRange) { if (subnet.m_subnet.Match(addr)) { NetPermissions::AddFlag(flags, subnet.m_flags); } } } std::string CNode::GetAddrName() const { LOCK(cs_addrName); return addrName; } void CNode::MaybeSetAddrName(const std::string &addrNameIn) { LOCK(cs_addrName); if (addrName.empty()) { addrName = addrNameIn; } } CService CNode::GetAddrLocal() const { LOCK(cs_addrLocal); return addrLocal; } void CNode::SetAddrLocal(const CService &addrLocalIn) { LOCK(cs_addrLocal); if (addrLocal.IsValid()) { error("Addr local already set for node: %i. Refusing to change from %s " "to %s", id, addrLocal.ToString(), addrLocalIn.ToString()); } else { addrLocal = addrLocalIn; } } void CNode::copyStats(CNodeStats &stats) { stats.nodeid = this->GetId(); stats.nServices = nServices; stats.addr = addr; stats.addrBind = addrBind; if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_filter); stats.fRelayTxes = m_tx_relay->fRelayTxes; } else { stats.fRelayTxes = false; } stats.nLastSend = nLastSend; stats.nLastRecv = nLastRecv; stats.nTimeConnected = nTimeConnected; stats.nTimeOffset = nTimeOffset; stats.addrName = GetAddrName(); stats.nVersion = nVersion; { LOCK(cs_SubVer); stats.cleanSubVer = cleanSubVer; } stats.fInbound = fInbound; stats.m_manual_connection = m_manual_connection; stats.nStartingHeight = nStartingHeight; { LOCK(cs_vSend); stats.mapSendBytesPerMsgCmd = mapSendBytesPerMsgCmd; stats.nSendBytes = nSendBytes; } { LOCK(cs_vRecv); stats.mapRecvBytesPerMsgCmd = mapRecvBytesPerMsgCmd; stats.nRecvBytes = nRecvBytes; } stats.m_legacyWhitelisted = m_legacyWhitelisted; stats.m_permissionFlags = m_permissionFlags; if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_feeFilter); stats.minFeeFilter = m_tx_relay->minFeeFilter; } else { stats.minFeeFilter = Amount::zero(); } // It is common for nodes with good ping times to suddenly become lagged, // due to a new block arriving or other large transfer. Merely reporting // pingtime might fool the caller into thinking the node was still // responsive, since pingtime does not update until the ping is complete, // which might take a while. So, if a ping is taking an unusually long time // in flight, the caller can immediately detect that this is happening. int64_t nPingUsecWait = 0; if ((0 != nPingNonceSent) && (0 != nPingUsecStart)) { nPingUsecWait = GetTimeMicros() - nPingUsecStart; } // Raw ping time is in microseconds, but show it to user as whole seconds // (Bitcoin users should be well used to small numbers with many decimal // places by now :) stats.m_ping_usec = nPingUsecTime; stats.m_min_ping_usec = nMinPingUsecTime; stats.m_ping_wait_usec = nPingUsecWait; // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); stats.addrLocal = addrLocalUnlocked.IsValid() ? addrLocalUnlocked.ToString() : ""; } static bool IsOversizedMessage(const Config &config, const CNetMessage &msg) { if (!msg.in_data) { // Header only, cannot be oversized. return false; } return msg.hdr.IsOversized(config); } bool CNode::ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete) { complete = false; int64_t nTimeMicros = GetTimeMicros(); LOCK(cs_vRecv); nLastRecv = nTimeMicros / 1000000; nRecvBytes += nBytes; while (nBytes > 0) { // Get current incomplete message, or create a new one. if (vRecvMsg.empty() || vRecvMsg.back().complete()) { vRecvMsg.push_back(CNetMessage(config.GetChainParams().NetMagic(), SER_NETWORK, INIT_PROTO_VERSION)); } CNetMessage &msg = vRecvMsg.back(); // Absorb network data. int handled; if (!msg.in_data) { handled = msg.readHeader(config, pch, nBytes); } else { handled = msg.readData(pch, nBytes); } if (handled < 0) { return false; } if (IsOversizedMessage(config, msg)) { LogPrint(BCLog::NET, "Oversized message from peer=%i, disconnecting\n", GetId()); return false; } pch += handled; nBytes -= handled; if (msg.complete()) { // Store received bytes per message command to prevent a memory DOS, // only allow valid commands. mapMsgCmdSize::iterator i = mapRecvBytesPerMsgCmd.find(msg.hdr.pchCommand.data()); if (i == mapRecvBytesPerMsgCmd.end()) { i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER); } assert(i != mapRecvBytesPerMsgCmd.end()); i->second += msg.hdr.nMessageSize + CMessageHeader::HEADER_SIZE; msg.nTime = nTimeMicros; complete = true; } } return true; } void CNode::SetSendVersion(int nVersionIn) { // Send version may only be changed in the version message, and only one // version message is allowed per session. We can therefore treat this value // as const and even atomic as long as it's only used once a version message // has been successfully processed. Any attempt to set this twice is an // error. if (nSendVersion != 0) { error("Send version already set for node: %i. Refusing to change from " "%i to %i", id, nSendVersion, nVersionIn); } else { nSendVersion = nVersionIn; } } int CNode::GetSendVersion() const { // The send version should always be explicitly set to INIT_PROTO_VERSION // rather than using this value until SetSendVersion has been called. if (nSendVersion == 0) { error("Requesting unset send version for node: %i. Using %i", id, INIT_PROTO_VERSION); return INIT_PROTO_VERSION; } return nSendVersion; } int CNetMessage::readHeader(const Config &config, const char *pch, uint32_t nBytes) { // copy data to temporary parsing buffer uint32_t nRemaining = 24 - nHdrPos; uint32_t nCopy = std::min(nRemaining, nBytes); memcpy(&hdrbuf[nHdrPos], pch, nCopy); nHdrPos += nCopy; // if header incomplete, exit if (nHdrPos < 24) { return nCopy; } // deserialize to CMessageHeader try { hdrbuf >> hdr; } catch (const std::exception &) { return -1; } // Reject oversized messages if (hdr.IsOversized(config)) { LogPrint(BCLog::NET, "Oversized header detected\n"); return -1; } // switch state to reading message data in_data = true; return nCopy; } int CNetMessage::readData(const char *pch, uint32_t nBytes) { unsigned int nRemaining = hdr.nMessageSize - nDataPos; unsigned int nCopy = std::min(nRemaining, nBytes); if (vRecv.size() < nDataPos + nCopy) { // Allocate up to 256 KiB ahead, but never more than the total message // size. vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } hasher.Write((const uint8_t *)pch, nCopy); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; return nCopy; } const uint256 &CNetMessage::GetMessageHash() const { assert(complete()); if (data_hash.IsNull()) { hasher.Finalize(data_hash.begin()); } return data_hash; } size_t CConnman::SocketSendData(CNode *pnode) const EXCLUSIVE_LOCKS_REQUIRED(pnode->cs_vSend) { size_t nSentSize = 0; size_t nMsgCount = 0; for (const auto &data : pnode->vSendMsg) { assert(data.size() > pnode->nSendOffset); int nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { break; } nBytes = send(pnode->hSocket, reinterpret_cast(data.data()) + pnode->nSendOffset, data.size() - pnode->nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT); } if (nBytes == 0) { // couldn't send anything at all break; } if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { LogPrintf("socket send error %s\n", NetworkErrorString(nErr)); pnode->CloseSocketDisconnect(); } break; } assert(nBytes > 0); pnode->nLastSend = GetSystemTimeInSeconds(); pnode->nSendBytes += nBytes; pnode->nSendOffset += nBytes; nSentSize += nBytes; if (pnode->nSendOffset != data.size()) { // could not send full message; stop sending more break; } pnode->nSendOffset = 0; pnode->nSendSize -= data.size(); pnode->fPauseSend = pnode->nSendSize > nSendBufferMaxSize; nMsgCount++; } pnode->vSendMsg.erase(pnode->vSendMsg.begin(), pnode->vSendMsg.begin() + nMsgCount); if (pnode->vSendMsg.empty()) { assert(pnode->nSendOffset == 0); assert(pnode->nSendSize == 0); } return nSentSize; } struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; int64_t nMinPingUsecTime; int64_t nLastBlockTime; int64_t nLastTXTime; bool fRelevantServices; bool fRelayTxes; bool fBloomFilter; CAddress addr; uint64_t nKeyedNetGroup; bool prefer_evict; }; static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nMinPingUsecTime > b.nMinPingUsecTime; } static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nTimeConnected > b.nTimeConnected; } static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nKeyedNetGroup < b.nKeyedNetGroup; } static bool CompareNodeBlockTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have many // peers which have not yet relayed a block. if (a.nLastBlockTime != b.nLastBlockTime) { return a.nLastBlockTime < b.nLastBlockTime; } if (a.fRelevantServices != b.fRelevantServices) { return b.fRelevantServices; } return a.nTimeConnected > b.nTimeConnected; } static bool CompareNodeTXTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { // There is a fall-through here because it is common for a node to have more // than a few peers that have not yet relayed txn. if (a.nLastTXTime != b.nLastTXTime) { return a.nLastTXTime < b.nLastTXTime; } if (a.fRelayTxes != b.fRelayTxes) { return b.fRelayTxes; } if (a.fBloomFilter != b.fBloomFilter) { return a.fBloomFilter; } return a.nTimeConnected > b.nTimeConnected; } //! Sort an array by the specified comparator, then erase the last K elements. template static void EraseLastKElements(std::vector &elements, Comparator comparator, size_t k) { std::sort(elements.begin(), elements.end(), comparator); size_t eraseSize = std::min(k, elements.size()); elements.erase(elements.end() - eraseSize, elements.end()); } /** * Try to find a connection to evict when the node is full. * Extreme care must be taken to avoid opening the node to attacker triggered * network partitioning. * The strategy used here is to protect a small number of peers for each of * several distinct characteristics which are difficult to forge. In order to * partition a node the attacker must be simultaneously better at all of them * than honest peers. */ bool CConnman::AttemptToEvictConnection() { std::vector vEvictionCandidates; { LOCK(cs_vNodes); for (const CNode *node : vNodes) { if (node->HasPermission(PF_NOBAN)) { continue; } if (!node->fInbound) { continue; } if (node->fDisconnect) { continue; } bool peer_relay_txes = false; bool peer_filter_not_null = false; if (node->m_tx_relay != nullptr) { LOCK(node->m_tx_relay->cs_filter); peer_relay_txes = node->m_tx_relay->fRelayTxes; peer_filter_not_null = node->m_tx_relay->pfilter != nullptr; } NodeEvictionCandidate candidate = { node->GetId(), node->nTimeConnected, node->nMinPingUsecTime, node->nLastBlockTime, node->nLastTXTime, HasAllDesirableServiceFlags(node->nServices), peer_relay_txes, peer_filter_not_null, node->addr, node->nKeyedNetGroup, node->m_prefer_evict}; vEvictionCandidates.push_back(candidate); } } // Protect connections with certain characteristics // Deterministically select 4 peers to protect by netgroup. // An attacker cannot predict which netgroups will be protected EraseLastKElements(vEvictionCandidates, CompareNetGroupKeyed, 4); // Protect the 8 nodes with the lowest minimum ping time. // An attacker cannot manipulate this metric without physically moving nodes // closer to the target. EraseLastKElements(vEvictionCandidates, ReverseCompareNodeMinPingTime, 8); // Protect 4 nodes that most recently sent us transactions. // An attacker cannot manipulate this metric without performing useful work. EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4); // Protect 4 nodes that most recently sent us blocks. // An attacker cannot manipulate this metric without performing useful work. EraseLastKElements(vEvictionCandidates, CompareNodeBlockTime, 4); // Protect the half of the remaining nodes which have been connected the // longest. This replicates the non-eviction implicit behavior, and // precludes attacks that start later. EraseLastKElements(vEvictionCandidates, ReverseCompareNodeTimeConnected, vEvictionCandidates.size() / 2); if (vEvictionCandidates.empty()) { return false; } // If any remaining peers are preferred for eviction consider only them. // This happens after the other preferences since if a peer is really the // best by other criteria (esp relaying blocks) // then we probably don't want to evict it no matter what. if (std::any_of( vEvictionCandidates.begin(), vEvictionCandidates.end(), [](NodeEvictionCandidate const &n) { return n.prefer_evict; })) { vEvictionCandidates.erase( std::remove_if( vEvictionCandidates.begin(), vEvictionCandidates.end(), [](NodeEvictionCandidate const &n) { return !n.prefer_evict; }), vEvictionCandidates.end()); } // Identify the network group with the most connections and youngest member. // (vEvictionCandidates is already sorted by reverse connect time) uint64_t naMostConnections; unsigned int nMostConnections = 0; int64_t nMostConnectionsTime = 0; std::map> mapNetGroupNodes; for (const NodeEvictionCandidate &node : vEvictionCandidates) { std::vector &group = mapNetGroupNodes[node.nKeyedNetGroup]; group.push_back(node); int64_t grouptime = group[0].nTimeConnected; size_t group_size = group.size(); if (group_size > nMostConnections || (group_size == nMostConnections && grouptime > nMostConnectionsTime)) { nMostConnections = group_size; nMostConnectionsTime = grouptime; naMostConnections = node.nKeyedNetGroup; } } // Reduce to the network group with the most connections vEvictionCandidates = std::move(mapNetGroupNodes[naMostConnections]); // Disconnect from the network group with the most connections NodeId evicted = vEvictionCandidates.front().id; LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (pnode->GetId() == evicted) { pnode->fDisconnect = true; return true; } } return false; } void CConnman::AcceptConnection(const ListenSocket &hListenSocket) { struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr *)&sockaddr, &len); CAddress addr; int nInbound = 0; int nMaxInbound = nMaxConnections - m_max_outbound; if (hSocket != INVALID_SOCKET) { if (!addr.SetSockAddr((const struct sockaddr *)&sockaddr)) { LogPrintf("Warning: Unknown socket family\n"); } } NetPermissionFlags permissionFlags = NetPermissionFlags::PF_NONE; hListenSocket.AddSocketPermissionFlags(permissionFlags); AddWhitelistPermissionFlags(permissionFlags, addr); bool legacyWhitelisted = false; if (NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_ISIMPLICIT)) { NetPermissions::ClearFlag(permissionFlags, PF_ISIMPLICIT); if (gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) { NetPermissions::AddFlag(permissionFlags, PF_FORCERELAY); } if (gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)) { NetPermissions::AddFlag(permissionFlags, PF_RELAY); } NetPermissions::AddFlag(permissionFlags, PF_MEMPOOL); NetPermissions::AddFlag(permissionFlags, PF_NOBAN); legacyWhitelisted = true; } { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->fInbound) { nInbound++; } } } if (hSocket == INVALID_SOCKET) { int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK) { LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr)); } return; } if (!fNetworkActive) { LogPrintf("connection from %s dropped: not accepting new connections\n", addr.ToString()); CloseSocket(hSocket); return; } if (!IsSelectableSocket(hSocket)) { LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString()); CloseSocket(hSocket); return; } // According to the internet TCP_NODELAY is not carried into accepted // sockets on all platforms. Set it again here just to be sure. SetSocketNoDelay(hSocket); // Don't accept connections from banned peers. bool banned = m_banman->IsBanned(addr); if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && banned) { LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToString()); CloseSocket(hSocket); return; } // Only accept connections from discouraged peers if our inbound slots // aren't (almost) full. bool discouraged = m_banman->IsDiscouraged(addr); if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::PF_NOBAN) && nInbound + 1 >= nMaxInbound && discouraged) { LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToString()); CloseSocket(hSocket); return; } if (nInbound >= nMaxInbound) { if (!AttemptToEvictConnection()) { // No connection to evict, disconnect the new connection LogPrint(BCLog::NET, "failed to find an eviction candidate - " "connection dropped (full)\n"); CloseSocket(hSocket); return; } } NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE) .Write(id) .Finalize(); CAddress addr_bind = GetBindAddress(hSocket); ServiceFlags nodeServices = nLocalServices; if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) { nodeServices = static_cast(nodeServices | NODE_BLOOM); } CNode *pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", true); pnode->AddRef(); pnode->m_permissionFlags = permissionFlags; // If this flag is present, the user probably expect that RPC and QT report // it as whitelisted (backward compatibility) pnode->m_legacyWhitelisted = legacyWhitelisted; pnode->m_prefer_evict = discouraged; m_msgproc->InitializeNode(*config, pnode); LogPrint(BCLog::NET, "connection from %s accepted\n", addr.ToString()); { LOCK(cs_vNodes); vNodes.push_back(pnode); } } void CConnman::DisconnectNodes() { { LOCK(cs_vNodes); if (!fNetworkActive) { // Disconnect any connected nodes for (CNode *pnode : vNodes) { if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "Network not active, dropping peer=%d\n", pnode->GetId()); pnode->fDisconnect = true; } } } // Disconnect unused nodes std::vector vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { // remove from vNodes vNodes.erase(remove(vNodes.begin(), vNodes.end(), pnode), vNodes.end()); // release outbound grant (if any) pnode->grantOutbound.Release(); // close socket and cleanup pnode->CloseSocketDisconnect(); // hold in disconnected pool until all refs are released pnode->Release(); vNodesDisconnected.push_back(pnode); } } } { // Delete disconnected nodes std::list vNodesDisconnectedCopy = vNodesDisconnected; for (CNode *pnode : vNodesDisconnectedCopy) { // wait until threads are done using it if (pnode->GetRefCount() <= 0) { bool fDelete = false; { TRY_LOCK(pnode->cs_inventory, lockInv); if (lockInv) { TRY_LOCK(pnode->cs_vSend, lockSend); if (lockSend) { fDelete = true; } } } if (fDelete) { vNodesDisconnected.remove(pnode); DeleteNode(pnode); } } } } } void CConnman::NotifyNumConnectionsChanged() { size_t vNodesSize; { LOCK(cs_vNodes); vNodesSize = vNodes.size(); } if (vNodesSize != nPrevNodeCount) { nPrevNodeCount = vNodesSize; if (clientInterface) { clientInterface->NotifyNumConnectionsChanged(vNodesSize); } } } void CConnman::InactivityCheck(CNode *pnode) { int64_t nTime = GetSystemTimeInSeconds(); if (nTime - pnode->nTimeConnected > m_peer_connect_timeout) { if (pnode->nLastRecv == 0 || pnode->nLastSend == 0) { LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d from %d\n", m_peer_connect_timeout, pnode->nLastRecv != 0, pnode->nLastSend != 0, pnode->GetId()); pnode->fDisconnect = true; } else if (nTime - pnode->nLastSend > TIMEOUT_INTERVAL) { LogPrintf("socket sending timeout: %is\n", nTime - pnode->nLastSend); pnode->fDisconnect = true; } else if (nTime - pnode->nLastRecv > (pnode->nVersion > BIP0031_VERSION ? TIMEOUT_INTERVAL : 90 * 60)) { LogPrintf("socket receive timeout: %is\n", nTime - pnode->nLastRecv); pnode->fDisconnect = true; } else if (pnode->nPingNonceSent && pnode->nPingUsecStart + TIMEOUT_INTERVAL * 1000000 < GetTimeMicros()) { LogPrintf("ping timeout: %fs\n", 0.000001 * (GetTimeMicros() - pnode->nPingUsecStart)); pnode->fDisconnect = true; } else if (!pnode->fSuccessfullyConnected) { LogPrint(BCLog::NET, "version handshake timeout from %d\n", pnode->GetId()); pnode->fDisconnect = true; } } } bool CConnman::GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set) { for (const ListenSocket &hListenSocket : vhListenSocket) { recv_set.insert(hListenSocket.socket); } { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { // Implement the following logic: // * If there is data to send, select() for sending data. As this // only happens when optimistic write failed, we choose to first // drain the write buffer in this case before receiving more. This // avoids needlessly queueing received data, if the remote peer is // not themselves receiving data. This means properly utilizing // TCP flow control signalling. // * Otherwise, if there is space left in the receive buffer, // select() for receiving data. // * Hand off all complete messages to the processor, to be handled // without blocking here. bool select_recv = !pnode->fPauseRecv; bool select_send; { LOCK(pnode->cs_vSend); select_send = !pnode->vSendMsg.empty(); } LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } error_set.insert(pnode->hSocket); if (select_send) { send_set.insert(pnode->hSocket); continue; } if (select_recv) { recv_set.insert(pnode->hSocket); } } } return !recv_set.empty() || !send_set.empty() || !error_set.empty(); } #ifdef USE_POLL void CConnman::SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set) { std::set recv_select_set, send_select_set, error_select_set; if (!GenerateSelectSet(recv_select_set, send_select_set, error_select_set)) { interruptNet.sleep_for( std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)); return; } std::unordered_map pollfds; for (SOCKET socket_id : recv_select_set) { pollfds[socket_id].fd = socket_id; pollfds[socket_id].events |= POLLIN; } for (SOCKET socket_id : send_select_set) { pollfds[socket_id].fd = socket_id; pollfds[socket_id].events |= POLLOUT; } for (SOCKET socket_id : error_select_set) { pollfds[socket_id].fd = socket_id; // These flags are ignored, but we set them for clarity pollfds[socket_id].events |= POLLERR | POLLHUP; } std::vector vpollfds; vpollfds.reserve(pollfds.size()); for (auto it : pollfds) { vpollfds.push_back(std::move(it.second)); } if (poll(vpollfds.data(), vpollfds.size(), SELECT_TIMEOUT_MILLISECONDS) < 0) { return; } if (interruptNet) { return; } for (struct pollfd pollfd_entry : vpollfds) { if (pollfd_entry.revents & POLLIN) { recv_set.insert(pollfd_entry.fd); } if (pollfd_entry.revents & POLLOUT) { send_set.insert(pollfd_entry.fd); } if (pollfd_entry.revents & (POLLERR | POLLHUP)) { error_set.insert(pollfd_entry.fd); } } } #else void CConnman::SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set) { std::set recv_select_set, send_select_set, error_select_set; if (!GenerateSelectSet(recv_select_set, send_select_set, error_select_set)) { interruptNet.sleep_for( std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS)); return; } // // Find which sockets have data to receive // struct timeval timeout; timeout.tv_sec = 0; // frequency to poll pnode->vSend timeout.tv_usec = SELECT_TIMEOUT_MILLISECONDS * 1000; fd_set fdsetRecv; fd_set fdsetSend; fd_set fdsetError; FD_ZERO(&fdsetRecv); FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); SOCKET hSocketMax = 0; for (SOCKET hSocket : recv_select_set) { FD_SET(hSocket, &fdsetRecv); hSocketMax = std::max(hSocketMax, hSocket); } for (SOCKET hSocket : send_select_set) { FD_SET(hSocket, &fdsetSend); hSocketMax = std::max(hSocketMax, hSocket); } for (SOCKET hSocket : error_select_set) { FD_SET(hSocket, &fdsetError); hSocketMax = std::max(hSocketMax, hSocket); } int nSelect = select(hSocketMax + 1, &fdsetRecv, &fdsetSend, &fdsetError, &timeout); if (interruptNet) { return; } if (nSelect == SOCKET_ERROR) { int nErr = WSAGetLastError(); LogPrintf("socket select error %s\n", NetworkErrorString(nErr)); for (unsigned int i = 0; i <= hSocketMax; i++) { FD_SET(i, &fdsetRecv); } FD_ZERO(&fdsetSend); FD_ZERO(&fdsetError); if (!interruptNet.sleep_for( std::chrono::milliseconds(SELECT_TIMEOUT_MILLISECONDS))) { return; } } for (SOCKET hSocket : recv_select_set) { if (FD_ISSET(hSocket, &fdsetRecv)) { recv_set.insert(hSocket); } } for (SOCKET hSocket : send_select_set) { if (FD_ISSET(hSocket, &fdsetSend)) { send_set.insert(hSocket); } } for (SOCKET hSocket : error_select_set) { if (FD_ISSET(hSocket, &fdsetError)) { error_set.insert(hSocket); } } } #endif void CConnman::SocketHandler() { std::set recv_set, send_set, error_set; SocketEvents(recv_set, send_set, error_set); if (interruptNet) { return; } // // Accept new connections // for (const ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET && recv_set.count(hListenSocket.socket) > 0) { AcceptConnection(hListenSocket); } } // // Service each socket // std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } for (CNode *pnode : vNodesCopy) { if (interruptNet) { return; } // // Receive // bool recvSet = false; bool sendSet = false; bool errorSet = false; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } recvSet = recv_set.count(pnode->hSocket) > 0; sendSet = send_set.count(pnode->hSocket) > 0; errorSet = error_set.count(pnode->hSocket) > 0; } if (recvSet || errorSet) { // typical socket buffer is 8K-64K char pchBuf[0x10000]; int32_t nBytes = 0; { LOCK(pnode->cs_hSocket); if (pnode->hSocket == INVALID_SOCKET) { continue; } nBytes = recv(pnode->hSocket, pchBuf, sizeof(pchBuf), MSG_DONTWAIT); } if (nBytes > 0) { bool notify = false; if (!pnode->ReceiveMsgBytes(*config, pchBuf, nBytes, notify)) { pnode->CloseSocketDisconnect(); } RecordBytesRecv(nBytes); if (notify) { size_t nSizeAdded = 0; auto it(pnode->vRecvMsg.begin()); for (; it != pnode->vRecvMsg.end(); ++it) { if (!it->complete()) { break; } nSizeAdded += it->vRecv.size() + CMessageHeader::HEADER_SIZE; } { LOCK(pnode->cs_vProcessMsg); pnode->vProcessMsg.splice(pnode->vProcessMsg.end(), pnode->vRecvMsg, pnode->vRecvMsg.begin(), it); pnode->nProcessQueueSize += nSizeAdded; pnode->fPauseRecv = pnode->nProcessQueueSize > nReceiveFloodSize; } WakeMessageHandler(); } } else if (nBytes == 0) { // socket closed gracefully if (!pnode->fDisconnect) { LogPrint(BCLog::NET, "socket closed\n"); } pnode->CloseSocketDisconnect(); } else if (nBytes < 0) { // error int nErr = WSAGetLastError(); if (nErr != WSAEWOULDBLOCK && nErr != WSAEMSGSIZE && nErr != WSAEINTR && nErr != WSAEINPROGRESS) { if (!pnode->fDisconnect) { LogPrintf("socket recv error %s\n", NetworkErrorString(nErr)); } pnode->CloseSocketDisconnect(); } } } // // Send // if (sendSet) { LOCK(pnode->cs_vSend); size_t nBytes = SocketSendData(pnode); if (nBytes) { RecordBytesSent(nBytes); } } InactivityCheck(pnode); } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } } void CConnman::ThreadSocketHandler() { while (!interruptNet) { DisconnectNodes(); NotifyNumConnectionsChanged(); SocketHandler(); } } void CConnman::WakeMessageHandler() { { - std::lock_guard lock(mutexMsgProc); + LOCK(mutexMsgProc); fMsgProcWake = true; } condMsgProc.notify_one(); } #ifdef USE_UPNP static CThreadInterrupt g_upnp_interrupt; static std::thread g_upnp_thread; static void ThreadMapPort() { std::string port = strprintf("%u", GetListenPort()); const char *multicastif = nullptr; const char *minissdpdpath = nullptr; struct UPNPDev *devlist = nullptr; char lanaddr[64]; #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0); #elif MINIUPNPC_API_VERSION < 14 /* miniupnpc 1.6 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error); #else /* miniupnpc 1.9.20150730 */ int error = 0; devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error); #endif struct UPNPUrls urls; struct IGDdatas data; int r; r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr)); if (r == 1) { if (fDiscover) { char externalIPAddress[40]; r = UPNP_GetExternalIPAddress( urls.controlURL, data.first.servicetype, externalIPAddress); if (r != UPNPCOMMAND_SUCCESS) { LogPrintf("UPnP: GetExternalIPAddress() returned %d\n", r); } else { if (externalIPAddress[0]) { CNetAddr resolved; if (LookupHost(externalIPAddress, resolved, false)) { LogPrintf("UPnP: ExternalIPAddress = %s\n", resolved.ToString()); AddLocal(resolved, LOCAL_UPNP); } } else { LogPrintf("UPnP: GetExternalIPAddress failed.\n"); } } } std::string strDesc = "Bitcoin " + FormatFullVersion(); do { #ifndef UPNPDISCOVER_SUCCESS /* miniupnpc 1.5 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0); #else /* miniupnpc 1.6 */ r = UPNP_AddPortMapping(urls.controlURL, data.first.servicetype, port.c_str(), port.c_str(), lanaddr, strDesc.c_str(), "TCP", 0, "0"); #endif if (r != UPNPCOMMAND_SUCCESS) { LogPrintf( "AddPortMapping(%s, %s, %s) failed with code %d (%s)\n", port, port, lanaddr, r, strupnperror(r)); } else { LogPrintf("UPnP Port Mapping successful.\n"); } } while (g_upnp_interrupt.sleep_for(std::chrono::minutes(20))); r = UPNP_DeletePortMapping(urls.controlURL, data.first.servicetype, port.c_str(), "TCP", 0); LogPrintf("UPNP_DeletePortMapping() returned: %d\n", r); freeUPNPDevlist(devlist); devlist = nullptr; FreeUPNPUrls(&urls); } else { LogPrintf("No valid UPnP IGDs found\n"); freeUPNPDevlist(devlist); devlist = nullptr; if (r != 0) { FreeUPNPUrls(&urls); } } } void StartMapPort() { if (!g_upnp_thread.joinable()) { assert(!g_upnp_interrupt); g_upnp_thread = std::thread( (std::bind(&TraceThread, "upnp", &ThreadMapPort))); } } void InterruptMapPort() { if (g_upnp_thread.joinable()) { g_upnp_interrupt(); } } void StopMapPort() { if (g_upnp_thread.joinable()) { g_upnp_thread.join(); g_upnp_interrupt.reset(); } } #else void StartMapPort() { // Intentionally left blank. } void InterruptMapPort() { // Intentionally left blank. } void StopMapPort() { // Intentionally left blank. } #endif void CConnman::ThreadDNSAddressSeed() { // goal: only query DNS seeds if address need is acute. // Avoiding DNS seeds when we don't need them improves user privacy by // creating fewer identifying DNS requests, reduces trust by giving seeds // less influence on the network topology, and reduces traffic to the seeds. if ((addrman.size() > 0) && (!gArgs.GetBoolArg("-forcednsseed", DEFAULT_FORCEDNSSEED))) { if (!interruptNet.sleep_for(std::chrono::seconds(11))) { return; } LOCK(cs_vNodes); int nRelevant = 0; for (const CNode *pnode : vNodes) { nRelevant += pnode->fSuccessfullyConnected && !pnode->fFeeler && !pnode->fOneShot && !pnode->m_manual_connection && !pnode->fInbound; } if (nRelevant >= 2) { LogPrintf("P2P peers available. Skipped DNS seeding.\n"); return; } } const std::vector &vSeeds = config->GetChainParams().DNSSeeds(); int found = 0; LogPrintf("Loading addresses from DNS seeds (could take a while)\n"); for (const std::string &seed : vSeeds) { if (interruptNet) { return; } if (HaveNameProxy()) { AddOneShot(seed); } else { std::vector vIPs; std::vector vAdd; ServiceFlags requiredServiceBits = GetDesirableServiceFlags(NODE_NONE); std::string host = strprintf("x%x.%s", requiredServiceBits, seed); CNetAddr resolveSource; if (!resolveSource.SetInternal(host)) { continue; } // Limits number of IPs learned from a DNS seed unsigned int nMaxIPs = 256; if (LookupHost(host.c_str(), vIPs, nMaxIPs, true)) { for (const CNetAddr &ip : vIPs) { int nOneDay = 24 * 3600; CAddress addr = CAddress( CService(ip, config->GetChainParams().GetDefaultPort()), requiredServiceBits); // Use a random age between 3 and 7 days old. addr.nTime = GetTime() - 3 * nOneDay - GetRand(4 * nOneDay); vAdd.push_back(addr); found++; } addrman.Add(vAdd, resolveSource); } else { // We now avoid directly using results from DNS Seeds which do // not support service bit filtering, instead using them as a // oneshot to get nodes with our desired service bits. AddOneShot(seed); } } } LogPrintf("%d addresses found from DNS seeds\n", found); } void CConnman::DumpAddresses() { int64_t nStart = GetTimeMillis(); CAddrDB adb(config->GetChainParams()); adb.Write(addrman); LogPrint(BCLog::NET, "Flushed %d addresses to peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } void CConnman::ProcessOneShot() { std::string strDest; { LOCK(cs_vOneShots); if (vOneShots.empty()) { return; } strDest = vOneShots.front(); vOneShots.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true); } } bool CConnman::GetTryNewOutboundPeer() { return m_try_another_outbound_peer; } void CConnman::SetTryNewOutboundPeer(bool flag) { m_try_another_outbound_peer = flag; LogPrint(BCLog::NET, "net: setting try another outbound peer=%s\n", flag ? "true" : "false"); } // Return the number of peers we have over our outbound connection limit. // Exclude peers that are marked for disconnect, or are going to be disconnected // soon (eg one-shots and feelers). // Also exclude peers that haven't finished initial connection handshake yet (so // that we don't decide we're over our desired connection limit, and then evict // some peer that has finished the handshake). int CConnman::GetExtraOutboundCount() { int nOutbound = 0; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) { ++nOutbound; } } } return std::max( nOutbound - m_max_outbound_full_relay - m_max_outbound_block_relay, 0); } void CConnman::ThreadOpenConnections(const std::vector connect) { // Connect to specific addresses if (!connect.empty()) { for (int64_t nLoop = 0;; nLoop++) { ProcessOneShot(); for (const std::string &strAddr : connect) { CAddress addr(CService(), NODE_NONE); OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), false, false, true); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for( std::chrono::milliseconds(500))) { return; } } } if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Initiate network connections int64_t nStart = GetTime(); // Minimum time before next feeler connection (in microseconds). int64_t nNextFeeler = PoissonNextSend(nStart * 1000 * 1000, FEELER_INTERVAL); while (!interruptNet) { ProcessOneShot(); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } CSemaphoreGrant grant(*semOutbound); if (interruptNet) { return; } // Add seed nodes if DNS seeds are all down (an infrastructure attack?). if (addrman.size() == 0 && (GetTime() - nStart > 60)) { static bool done = false; if (!done) { LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be " "available.\n"); CNetAddr local; local.SetInternal("fixedseeds"); addrman.Add(convertSeed6(config->GetChainParams().FixedSeeds()), local); done = true; } } // // Choose an address to connect to based on most recently seen // CAddress addrConnect; // Only connect out to one peer per network group (/16 for IPv4). int nOutboundFullRelay = 0; int nOutboundBlockRelay = 0; std::set> setConnected; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (!pnode->fInbound && !pnode->m_manual_connection) { // Netgroups for inbound and addnode peers are not excluded // because our goal here is to not use multiple of our // limited outbound slots on a single netgroup but inbound // and addnode peers do not use our outbound slots. Inbound // peers also have the added issue that they're attacker // controlled and could be used to prevent us from // connecting to particular hosts if we used them here. setConnected.insert(pnode->addr.GetGroup()); if (pnode->m_tx_relay == nullptr) { nOutboundBlockRelay++; } else if (!pnode->fFeeler) { nOutboundFullRelay++; } } } } // Feeler Connections // // Design goals: // * Increase the number of connectable addresses in the tried table. // // Method: // * Choose a random address from new and attempt to connect to it if // we can connect successfully it is added to tried. // * Start attempting feeler connections only after node finishes // making outbound connections. // * Only make a feeler connection once every few minutes. // bool fFeeler = false; if (nOutboundFullRelay >= m_max_outbound_full_relay && nOutboundBlockRelay >= m_max_outbound_block_relay && !GetTryNewOutboundPeer()) { // The current time right now (in microseconds). int64_t nTime = GetTimeMicros(); if (nTime > nNextFeeler) { nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL); fFeeler = true; } else { continue; } } addrman.ResolveCollisions(); int64_t nANow = GetAdjustedTime(); int nTries = 0; while (!interruptNet) { CAddrInfo addr = addrman.SelectTriedCollision(); // SelectTriedCollision returns an invalid address if it is empty. if (!fFeeler || !addr.IsValid()) { addr = addrman.Select(fFeeler); } // Require outbound connections, other than feelers, to be to // distinct network groups if (!fFeeler && setConnected.count(addr.GetGroup())) { break; } // if we selected an invalid or local address, restart if (!addr.IsValid() || IsLocal(addr)) { break; } // If we didn't find an appropriate destination after trying 100 // addresses fetched from addrman, stop this loop, and let the outer // loop run again (which sleeps, adds seed nodes, recalculates // already-connected network ranges, ...) before trying new addrman // addresses. nTries++; if (nTries > 100) { break; } if (!IsReachable(addr)) { continue; } // only consider very recently tried nodes after 30 failed attempts if (nANow - addr.nLastTry < 600 && nTries < 30) { continue; } // for non-feelers, require all the services we'll want, // for feelers, only require they be a full node (only because most // SPV clients don't have a good address DB available) if (!fFeeler && !HasAllDesirableServiceFlags(addr.nServices)) { continue; } if (fFeeler && !MayHaveUsefulAddressDB(addr.nServices)) { continue; } // do not allow non-default ports, unless after 50 invalid addresses // selected already. if (addr.GetPort() != config->GetChainParams().GetDefaultPort() && nTries < 50) { continue; } addrConnect = addr; break; } if (addrConnect.IsValid()) { if (fFeeler) { // Add small amount of random noise before connection to avoid // synchronization. int randsleep = GetRandInt(FEELER_SLEEP_WINDOW * 1000); if (!interruptNet.sleep_for( std::chrono::milliseconds(randsleep))) { return; } LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString()); } // Open this connection as block-relay-only if we're already at our // full-relay capacity, but not yet at our block-relay peer limit. // (It should not be possible for fFeeler to be set if we're not // also at our block-relay peer limit, but check against that as // well for sanity.) bool block_relay_only = nOutboundBlockRelay < m_max_outbound_block_relay && !fFeeler && nOutboundFullRelay >= m_max_outbound_full_relay; OpenNetworkConnection( addrConnect, int(setConnected.size()) >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler, false, block_relay_only); } } } std::vector CConnman::GetAddedNodeInfo() { std::vector ret; std::list lAddresses(0); { LOCK(cs_vAddedNodes); ret.reserve(vAddedNodes.size()); std::copy(vAddedNodes.cbegin(), vAddedNodes.cend(), std::back_inserter(lAddresses)); } // Build a map of all already connected addresses (by IP:port and by name) // to inbound/outbound and resolved CService std::map mapConnected; std::map> mapConnectedByName; { LOCK(cs_vNodes); for (const CNode *pnode : vNodes) { if (pnode->addr.IsValid()) { mapConnected[pnode->addr] = pnode->fInbound; } std::string addrName = pnode->GetAddrName(); if (!addrName.empty()) { mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast(pnode->addr)); } } } for (const std::string &strAddNode : lAddresses) { CService service( LookupNumeric(strAddNode.c_str(), Params().GetDefaultPort())); AddedNodeInfo addedNode{strAddNode, CService(), false, false}; if (service.IsValid()) { // strAddNode is an IP:port auto it = mapConnected.find(service); if (it != mapConnected.end()) { addedNode.resolvedAddress = service; addedNode.fConnected = true; addedNode.fInbound = it->second; } } else { // strAddNode is a name auto it = mapConnectedByName.find(strAddNode); if (it != mapConnectedByName.end()) { addedNode.resolvedAddress = it->second.second; addedNode.fConnected = true; addedNode.fInbound = it->second.first; } } ret.emplace_back(std::move(addedNode)); } return ret; } void CConnman::ThreadOpenAddedConnections() { while (true) { CSemaphoreGrant grant(*semAddnode); std::vector vInfo = GetAddedNodeInfo(); bool tried = false; for (const AddedNodeInfo &info : vInfo) { if (!info.fConnected) { if (!grant.TryAcquire()) { // If we've used up our semaphore and need a new one, let's // not wait here since while we are waiting the // addednodeinfo state might change. break; } tried = true; CAddress addr(CService(), NODE_NONE); OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), false, false, true); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) { return; } } } // Retry every 60 seconds if a connection was attempted, otherwise two // seconds. if (!interruptNet.sleep_for(std::chrono::seconds(tried ? 60 : 2))) { return; } } } // If successful, this moves the passed grant to the constructed node. void CConnman::OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool manual_connection, bool block_relay_only) { // // Initiate outbound network connection // if (interruptNet) { return; } if (!fNetworkActive) { return; } if (!pszDest) { bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addrConnect) || m_banman->IsBanned(addrConnect)); if (IsLocal(addrConnect) || FindNode(static_cast(addrConnect)) || banned_or_discouraged || FindNode(addrConnect.ToStringIPPort())) { return; } } else if (FindNode(std::string(pszDest))) { return; } CNode *pnode = ConnectNode(addrConnect, pszDest, fCountFailure, manual_connection, block_relay_only); if (!pnode) { return; } if (grantOutbound) { grantOutbound->MoveTo(pnode->grantOutbound); } if (fOneShot) { pnode->fOneShot = true; } if (fFeeler) { pnode->fFeeler = true; } if (manual_connection) { pnode->m_manual_connection = true; } m_msgproc->InitializeNode(*config, pnode); { LOCK(cs_vNodes); vNodes.push_back(pnode); } } void CConnman::ThreadMessageHandler() { while (!flagInterruptMsgProc) { std::vector vNodesCopy; { LOCK(cs_vNodes); vNodesCopy = vNodes; for (CNode *pnode : vNodesCopy) { pnode->AddRef(); } } bool fMoreWork = false; for (CNode *pnode : vNodesCopy) { if (pnode->fDisconnect) { continue; } // Receive messages bool fMoreNodeWork = m_msgproc->ProcessMessages( *config, pnode, flagInterruptMsgProc); fMoreWork |= (fMoreNodeWork && !pnode->fPauseSend); if (flagInterruptMsgProc) { return; } // Send messages { LOCK(pnode->cs_sendProcessing); m_msgproc->SendMessages(*config, pnode, flagInterruptMsgProc); } if (flagInterruptMsgProc) { return; } } { LOCK(cs_vNodes); for (CNode *pnode : vNodesCopy) { pnode->Release(); } } WAIT_LOCK(mutexMsgProc, lock); if (!fMoreWork) { condMsgProc.wait_until(lock, std::chrono::steady_clock::now() + std::chrono::milliseconds(100), - [this] { return fMsgProcWake; }); + [this]() EXCLUSIVE_LOCKS_REQUIRED( + mutexMsgProc) { return fMsgProcWake; }); } fMsgProcWake = false; } } bool CConnman::BindListenPort(const CService &addrBind, std::string &strError, NetPermissionFlags permissions) { strError = ""; int nOne = 1; // Create socket for listening for incoming connections struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); if (!addrBind.GetSockAddr((struct sockaddr *)&sockaddr, &len)) { strError = strprintf("Error: Bind address family for %s not supported", addrBind.ToString()); LogPrintf("%s\n", strError); return false; } SOCKET hListenSocket = CreateSocket(addrBind); if (hListenSocket == INVALID_SOCKET) { strError = strprintf("Error: Couldn't open socket for incoming " "connections (socket returned error %s)", NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); return false; } // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)); // Some systems don't have IPV6_V6ONLY but are always v6only; others do have // the option and enable it by default or not. Try to enable it, if // possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)); #endif #ifdef WIN32 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (sockopt_arg_type)&nProtLevel, sizeof(int)); #endif } if (::bind(hListenSocket, (struct sockaddr *)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) { strError = strprintf(_("Unable to bind to %s on this computer. %s " "is probably already running.") .translated, addrBind.ToString(), PACKAGE_NAME); } else { strError = strprintf(_("Unable to bind to %s on this computer " "(bind returned error %s)") .translated, addrBind.ToString(), NetworkErrorString(nErr)); } LogPrintf("%s\n", strError); CloseSocket(hListenSocket); return false; } LogPrintf("Bound to %s\n", addrBind.ToString()); // Listen for incoming connections if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) { strError = strprintf(_("Error: Listening for incoming connections " "failed (listen returned error %s)") .translated, NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError); CloseSocket(hListenSocket); return false; } vhListenSocket.push_back(ListenSocket(hListenSocket, permissions)); if (addrBind.IsRoutable() && fDiscover && (permissions & PF_NOBAN) == 0) { AddLocal(addrBind, LOCAL_BIND); } return true; } void Discover() { if (!fDiscover) { return; } #ifdef WIN32 // Get local host IP char pszHostName[256] = ""; if (gethostname(pszHostName, sizeof(pszHostName)) != SOCKET_ERROR) { std::vector vaddr; if (LookupHost(pszHostName, vaddr, 0, true)) { for (const CNetAddr &addr : vaddr) { if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: %s - %s\n", __func__, pszHostName, addr.ToString()); } } } } #elif (HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS) // Get local host ip struct ifaddrs *myaddrs; if (getifaddrs(&myaddrs) == 0) { for (struct ifaddrs *ifa = myaddrs; ifa != nullptr; ifa = ifa->ifa_next) { if (ifa->ifa_addr == nullptr || (ifa->ifa_flags & IFF_UP) == 0 || strcmp(ifa->ifa_name, "lo") == 0 || strcmp(ifa->ifa_name, "lo0") == 0) { continue; } if (ifa->ifa_addr->sa_family == AF_INET) { struct sockaddr_in *s4 = reinterpret_cast(ifa->ifa_addr); CNetAddr addr(s4->sin_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv4 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } else if (ifa->ifa_addr->sa_family == AF_INET6) { struct sockaddr_in6 *s6 = reinterpret_cast(ifa->ifa_addr); CNetAddr addr(s6->sin6_addr); if (AddLocal(addr, LOCAL_IF)) { LogPrintf("%s: IPv6 %s: %s\n", __func__, ifa->ifa_name, addr.ToString()); } } } freeifaddrs(myaddrs); } #endif } void CConnman::SetNetworkActive(bool active) { LogPrint(BCLog::NET, "SetNetworkActive: %s\n", active); if (fNetworkActive == active) { return; } fNetworkActive = active; uiInterface.NotifyNetworkActiveChanged(fNetworkActive); } CConnman::CConnman(const Config &configIn, uint64_t nSeed0In, uint64_t nSeed1In) : config(&configIn), nSeed0(nSeed0In), nSeed1(nSeed1In) { SetTryNewOutboundPeer(false); Options connOptions; Init(connOptions); } NodeId CConnman::GetNewNodeId() { return nLastNodeId.fetch_add(1); } bool CConnman::Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions) { if (!(flags & BF_EXPLICIT) && !IsReachable(addr)) { return false; } std::string strError; if (!BindListenPort(addr, strError, permissions)) { if ((flags & BF_REPORT_ERROR) && clientInterface) { clientInterface->ThreadSafeMessageBox( strError, "", CClientUIInterface::MSG_ERROR); } return false; } return true; } bool CConnman::InitBinds( const std::vector &binds, const std::vector &whiteBinds) { bool fBound = false; for (const auto &addrBind : binds) { fBound |= Bind(addrBind, (BF_EXPLICIT | BF_REPORT_ERROR), NetPermissionFlags::PF_NONE); } for (const auto &addrBind : whiteBinds) { fBound |= Bind(addrBind.m_service, (BF_EXPLICIT | BF_REPORT_ERROR), addrBind.m_flags); } if (binds.empty() && whiteBinds.empty()) { struct in_addr inaddr_any; inaddr_any.s_addr = INADDR_ANY; struct in6_addr inaddr6_any = IN6ADDR_ANY_INIT; fBound |= Bind(CService(inaddr6_any, GetListenPort()), BF_NONE, NetPermissionFlags::PF_NONE); fBound |= Bind(CService(inaddr_any, GetListenPort()), !fBound ? BF_REPORT_ERROR : BF_NONE, NetPermissionFlags::PF_NONE); } return fBound; } bool CConnman::Start(CScheduler &scheduler, const Options &connOptions) { Init(connOptions); { LOCK(cs_totalBytesRecv); nTotalBytesRecv = 0; } { LOCK(cs_totalBytesSent); nTotalBytesSent = 0; nMaxOutboundTotalBytesSentInCycle = 0; nMaxOutboundCycleStartTime = 0; } if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds)) { if (clientInterface) { clientInterface->ThreadSafeMessageBox( _("Failed to listen on any port. Use -listen=0 if you want " "this.") .translated, "", CClientUIInterface::MSG_ERROR); } return false; } for (const auto &strDest : connOptions.vSeedNodes) { AddOneShot(strDest); } if (clientInterface) { clientInterface->InitMessage(_("Loading P2P addresses...").translated); } // Load addresses from peers.dat int64_t nStart = GetTimeMillis(); { CAddrDB adb(config->GetChainParams()); if (adb.Read(addrman)) { LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman.size(), GetTimeMillis() - nStart); } else { // Addrman can be in an inconsistent state after failure, reset it addrman.Clear(); LogPrintf("Invalid or missing peers.dat; recreating\n"); DumpAddresses(); } } uiInterface.InitMessage(_("Starting network threads...").translated); fAddressesInitialized = true; if (semOutbound == nullptr) { // initialize semaphore semOutbound = std::make_unique( std::min(m_max_outbound, nMaxConnections)); } if (semAddnode == nullptr) { // initialize semaphore semAddnode = std::make_unique(nMaxAddnode); } // // Start threads // assert(m_msgproc); InterruptSocks5(false); interruptNet.reset(); flagInterruptMsgProc = false; { LOCK(mutexMsgProc); fMsgProcWake = false; } // Send and receive from sockets, accept connections threadSocketHandler = std::thread( &TraceThread>, "net", std::function(std::bind(&CConnman::ThreadSocketHandler, this))); if (!gArgs.GetBoolArg("-dnsseed", true)) { LogPrintf("DNS seeding disabled\n"); } else { threadDNSAddressSeed = std::thread(&TraceThread>, "dnsseed", std::function( std::bind(&CConnman::ThreadDNSAddressSeed, this))); } // Initiate outbound connections from -addnode threadOpenAddedConnections = std::thread(&TraceThread>, "addcon", std::function(std::bind( &CConnman::ThreadOpenAddedConnections, this))); if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) { if (clientInterface) { clientInterface->ThreadSafeMessageBox( _("Cannot provide specific connections and have addrman find " "outgoing connections at the same.") .translated, "", CClientUIInterface::MSG_ERROR); } return false; } if (connOptions.m_use_addrman_outgoing || !connOptions.m_specified_outgoing.empty()) { threadOpenConnections = std::thread(&TraceThread>, "opencon", std::function( std::bind(&CConnman::ThreadOpenConnections, this, connOptions.m_specified_outgoing))); } // Process messages threadMessageHandler = std::thread(&TraceThread>, "msghand", std::function( std::bind(&CConnman::ThreadMessageHandler, this))); // Dump network addresses scheduler.scheduleEvery( [this]() { this->DumpAddresses(); return true; }, DUMP_PEERS_INTERVAL); return true; } class CNetCleanup { public: CNetCleanup() {} ~CNetCleanup() { #ifdef WIN32 // Shutdown Windows Sockets WSACleanup(); #endif } }; static CNetCleanup instance_of_cnetcleanup; void CConnman::Interrupt() { { - std::lock_guard lock(mutexMsgProc); + LOCK(mutexMsgProc); flagInterruptMsgProc = true; } condMsgProc.notify_all(); interruptNet(); InterruptSocks5(true); if (semOutbound) { for (int i = 0; i < m_max_outbound; i++) { semOutbound->post(); } } if (semAddnode) { for (int i = 0; i < nMaxAddnode; i++) { semAddnode->post(); } } } void CConnman::Stop() { if (threadMessageHandler.joinable()) { threadMessageHandler.join(); } if (threadOpenConnections.joinable()) { threadOpenConnections.join(); } if (threadOpenAddedConnections.joinable()) { threadOpenAddedConnections.join(); } if (threadDNSAddressSeed.joinable()) { threadDNSAddressSeed.join(); } if (threadSocketHandler.joinable()) { threadSocketHandler.join(); } if (fAddressesInitialized) { DumpAddresses(); fAddressesInitialized = false; } // Close sockets for (CNode *pnode : vNodes) { pnode->CloseSocketDisconnect(); } for (ListenSocket &hListenSocket : vhListenSocket) { if (hListenSocket.socket != INVALID_SOCKET) { if (!CloseSocket(hListenSocket.socket)) { LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError())); } } } // clean up some globals (to help leak detection) for (CNode *pnode : vNodes) { DeleteNode(pnode); } for (CNode *pnode : vNodesDisconnected) { DeleteNode(pnode); } vNodes.clear(); vNodesDisconnected.clear(); vhListenSocket.clear(); semOutbound.reset(); semAddnode.reset(); } void CConnman::DeleteNode(CNode *pnode) { assert(pnode); bool fUpdateConnectionTime = false; m_msgproc->FinalizeNode(*config, pnode->GetId(), fUpdateConnectionTime); if (fUpdateConnectionTime) { addrman.Connected(pnode->addr); } delete pnode; } CConnman::~CConnman() { Interrupt(); Stop(); } size_t CConnman::GetAddressCount() const { return addrman.size(); } void CConnman::SetServices(const CService &addr, ServiceFlags nServices) { addrman.SetServices(addr, nServices); } void CConnman::MarkAddressGood(const CAddress &addr) { addrman.Good(addr); } void CConnman::AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty) { addrman.Add(vAddr, addrFrom, nTimePenalty); } std::vector CConnman::GetAddresses() { return addrman.GetAddr(); } bool CConnman::AddNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (const std::string &it : vAddedNodes) { if (strNode == it) { return false; } } vAddedNodes.push_back(strNode); return true; } bool CConnman::RemoveAddedNode(const std::string &strNode) { LOCK(cs_vAddedNodes); for (std::vector::iterator it = vAddedNodes.begin(); it != vAddedNodes.end(); ++it) { if (strNode == *it) { vAddedNodes.erase(it); return true; } } return false; } size_t CConnman::GetNodeCount(NumConnections flags) { LOCK(cs_vNodes); // Shortcut if we want total if (flags == CConnman::CONNECTIONS_ALL) { return vNodes.size(); } int nNum = 0; for (const auto &pnode : vNodes) { if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) { nNum++; } } return nNum; } void CConnman::GetNodeStats(std::vector &vstats) { vstats.clear(); LOCK(cs_vNodes); vstats.reserve(vNodes.size()); for (CNode *pnode : vNodes) { vstats.emplace_back(); pnode->copyStats(vstats.back()); } } bool CConnman::DisconnectNode(const std::string &strNode) { LOCK(cs_vNodes); if (CNode *pnode = FindNode(strNode)) { pnode->fDisconnect = true; return true; } return false; } bool CConnman::DisconnectNode(const CSubNet &subnet) { bool disconnected = false; LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (subnet.Match(pnode->addr)) { pnode->fDisconnect = true; disconnected = true; } } return disconnected; } bool CConnman::DisconnectNode(const CNetAddr &addr) { return DisconnectNode(CSubNet(addr)); } bool CConnman::DisconnectNode(NodeId id) { LOCK(cs_vNodes); for (CNode *pnode : vNodes) { if (id == pnode->GetId()) { pnode->fDisconnect = true; return true; } } return false; } void CConnman::RecordBytesRecv(uint64_t bytes) { LOCK(cs_totalBytesRecv); nTotalBytesRecv += bytes; } void CConnman::RecordBytesSent(uint64_t bytes) { LOCK(cs_totalBytesSent); nTotalBytesSent += bytes; uint64_t now = GetTime(); if (nMaxOutboundCycleStartTime + nMaxOutboundTimeframe < now) { // timeframe expired, reset cycle nMaxOutboundCycleStartTime = now; nMaxOutboundTotalBytesSentInCycle = 0; } // TODO, exclude whitebind peers nMaxOutboundTotalBytesSentInCycle += bytes; } void CConnman::SetMaxOutboundTarget(uint64_t limit) { LOCK(cs_totalBytesSent); nMaxOutboundLimit = limit; } uint64_t CConnman::GetMaxOutboundTarget() { LOCK(cs_totalBytesSent); return nMaxOutboundLimit; } uint64_t CConnman::GetMaxOutboundTimeframe() { LOCK(cs_totalBytesSent); return nMaxOutboundTimeframe; } uint64_t CConnman::GetMaxOutboundTimeLeftInCycle() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } if (nMaxOutboundCycleStartTime == 0) { return nMaxOutboundTimeframe; } uint64_t cycleEndTime = nMaxOutboundCycleStartTime + nMaxOutboundTimeframe; uint64_t now = GetTime(); return (cycleEndTime < now) ? 0 : cycleEndTime - GetTime(); } void CConnman::SetMaxOutboundTimeframe(uint64_t timeframe) { LOCK(cs_totalBytesSent); if (nMaxOutboundTimeframe != timeframe) { // reset measure-cycle in case of changing the timeframe. nMaxOutboundCycleStartTime = GetTime(); } nMaxOutboundTimeframe = timeframe; } bool CConnman::OutboundTargetReached(bool historicalBlockServingLimit) { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return false; } if (historicalBlockServingLimit) { // keep a large enough buffer to at least relay each block once. uint64_t timeLeftInCycle = GetMaxOutboundTimeLeftInCycle(); uint64_t buffer = timeLeftInCycle / 600 * ONE_MEGABYTE; if (buffer >= nMaxOutboundLimit || nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit - buffer) { return true; } } else if (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) { return true; } return false; } uint64_t CConnman::GetOutboundTargetBytesLeft() { LOCK(cs_totalBytesSent); if (nMaxOutboundLimit == 0) { return 0; } return (nMaxOutboundTotalBytesSentInCycle >= nMaxOutboundLimit) ? 0 : nMaxOutboundLimit - nMaxOutboundTotalBytesSentInCycle; } uint64_t CConnman::GetTotalBytesRecv() { LOCK(cs_totalBytesRecv); return nTotalBytesRecv; } uint64_t CConnman::GetTotalBytesSent() { LOCK(cs_totalBytesSent); return nTotalBytesSent; } ServiceFlags CConnman::GetLocalServices() const { return nLocalServices; } void CConnman::SetBestHeight(int height) { nBestHeight.store(height, std::memory_order_release); } int CConnman::GetBestHeight() const { return nBestHeight.load(std::memory_order_acquire); } unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, bool fInboundIn, bool block_relay_only) : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), addrBind(addrBindIn), fInbound(fInboundIn), nKeyedNetGroup(nKeyedNetGroupIn), addrKnown(5000, 0.001), // Don't relay addr messages to peers that we connect to as // block-relay-only peers (to prevent adversaries from inferring these // links from addr traffic). m_addr_relay_peer(!block_relay_only), id(idIn), nLocalHostNonce(nLocalHostNonceIn), nLocalServices(nLocalServicesIn), nMyStartingHeight(nMyStartingHeightIn) { hSocket = hSocketIn; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; hashContinue = BlockHash(); if (!block_relay_only) { m_tx_relay = std::make_unique(); } for (const std::string &msg : getAllNetMessageTypes()) { mapRecvBytesPerMsgCmd[msg] = 0; } mapRecvBytesPerMsgCmd[NET_MESSAGE_COMMAND_OTHER] = 0; if (fLogIPs) { LogPrint(BCLog::NET, "Added connection to %s peer=%d\n", addrName, id); } else { LogPrint(BCLog::NET, "Added connection peer=%d\n", id); } } CNode::~CNode() { CloseSocket(hSocket); } bool CConnman::NodeFullyConnected(const CNode *pnode) { return pnode && pnode->fSuccessfullyConnected && !pnode->fDisconnect; } void CConnman::PushMessage(CNode *pnode, CSerializedNetMsg &&msg) { size_t nMessageSize = msg.data.size(); size_t nTotalSize = nMessageSize + CMessageHeader::HEADER_SIZE; LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.command), nMessageSize, pnode->GetId()); std::vector serializedHeader; serializedHeader.reserve(CMessageHeader::HEADER_SIZE); uint256 hash = Hash(msg.data.data(), msg.data.data() + nMessageSize); CMessageHeader hdr(config->GetChainParams().NetMagic(), msg.command.c_str(), nMessageSize); memcpy(hdr.pchChecksum, hash.begin(), CMessageHeader::CHECKSUM_SIZE); CVectorWriter{SER_NETWORK, INIT_PROTO_VERSION, serializedHeader, 0, hdr}; size_t nBytesSent = 0; { LOCK(pnode->cs_vSend); bool optimisticSend(pnode->vSendMsg.empty()); // log total amount of bytes per command pnode->mapSendBytesPerMsgCmd[msg.command] += nTotalSize; pnode->nSendSize += nTotalSize; if (pnode->nSendSize > nSendBufferMaxSize) { pnode->fPauseSend = true; } pnode->vSendMsg.push_back(std::move(serializedHeader)); if (nMessageSize) { pnode->vSendMsg.push_back(std::move(msg.data)); } // If write queue empty, attempt "optimistic write" if (optimisticSend == true) { nBytesSent = SocketSendData(pnode); } } if (nBytesSent) { RecordBytesSent(nBytesSent); } } bool CConnman::ForNode(NodeId id, std::function func) { CNode *found = nullptr; LOCK(cs_vNodes); for (auto &&pnode : vNodes) { if (pnode->GetId() == id) { found = pnode; break; } } return found != nullptr && NodeFullyConnected(found) && func(found); } int64_t CConnman::PoissonNextSendInbound(int64_t now, int average_interval_seconds) { if (m_next_send_inv_to_incoming < now) { // If this function were called from multiple threads simultaneously // it would be possible that both update the next send variable, and // return a different result to their caller. This is not possible in // practice as only the net processing thread invokes this function. m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval_seconds); } return m_next_send_inv_to_incoming; } int64_t PoissonNextSend(int64_t now, int average_interval_seconds) { return now + int64_t(log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */) * average_interval_seconds * -1000000.0 + 0.5); } CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const { return CSipHasher(nSeed0, nSeed1).Write(id); } uint64_t CConnman::CalculateKeyedNetGroup(const CAddress &ad) const { std::vector vchNetGroup(ad.GetGroup()); return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP) .Write(vchNetGroup.data(), vchNetGroup.size()) .Finalize(); } /** * This function convert MaxBlockSize from byte to * MB with a decimal precision one digit rounded down * E.g. * 1660000 -> 1.6 * 2010000 -> 2.0 * 1000000 -> 1.0 * 230000 -> 0.2 * 50000 -> 0.0 * * NB behavior for EB<1MB not standardized yet still * the function applies the same algo used for * EB greater or equal to 1MB */ std::string getSubVersionEB(uint64_t MaxBlockSize) { // Prepare EB string we are going to add to SubVer: // 1) translate from byte to MB and convert to string // 2) limit the EB string to the first decimal digit (floored) std::stringstream ebMBs; ebMBs << (MaxBlockSize / (ONE_MEGABYTE / 10)); std::string eb = ebMBs.str(); eb.insert(eb.size() - 1, ".", 1); if (eb.substr(0, 1) == ".") { eb = "0" + eb; } return eb; } std::string userAgent(const Config &config) { // format excessive blocksize value std::string eb = getSubVersionEB(config.GetMaxBlockSize()); std::vector uacomments; uacomments.push_back("EB" + eb); // Comments are checked for char compliance at startup, it is safe to add // them to the user agent string for (const std::string &cmt : gArgs.GetArgs("-uacomment")) { uacomments.push_back(cmt); } // Size compliance is checked at startup, it is safe to not check it again std::string subversion = FormatSubVersion(CLIENT_NAME, CLIENT_VERSION, uacomments); return subversion; } diff --git a/src/net.h b/src/net.h index adf785de9..023a63f20 100644 --- a/src/net.h +++ b/src/net.h @@ -1,953 +1,953 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NET_H #define BITCOIN_NET_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #endif class BanMan; class Config; class CNode; class CScheduler; /** Default for -whitelistrelay. */ static const bool DEFAULT_WHITELISTRELAY = true; /** Default for -whitelistforcerelay. */ static const bool DEFAULT_WHITELISTFORCERELAY = false; /** * Time between pings automatically sent out for latency probing and keepalive * (in seconds). */ static const int PING_INTERVAL = 2 * 60; /** * Time after which to disconnect, after waiting for a ping response (or * inactivity). */ static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; /** The maximum number of entries in an 'inv' protocol message */ static const unsigned int MAX_INV_SZ = 50000; static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv), "Max protocol message length must be greater than largest " "possible INV message"); /** The maximum number of entries in a locator */ static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of new addresses to accumulate before announcing. */ static const unsigned int MAX_ADDR_TO_SEND = 1000; /** Maximum length of the user agent string in `version` message */ static const unsigned int MAX_SUBVERSION_LENGTH = 256; /** * Maximum number of automatic outgoing nodes over which we'll relay everything * (blocks, tx, addrs, etc) */ static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8; /** Maximum number of addnode outgoing nodes */ static const int MAX_ADDNODE_CONNECTIONS = 8; /** Maximum number of block-relay-only outgoing connections */ static const int MAX_BLOCKS_ONLY_CONNECTIONS = 2; /** -listen default */ static const bool DEFAULT_LISTEN = true; /** -upnp default */ #ifdef USE_UPNP static const bool DEFAULT_UPNP = USE_UPNP; #else static const bool DEFAULT_UPNP = false; #endif /** The maximum number of peer connections to maintain. */ static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 125; /** The default for -maxuploadtarget. 0 = Unlimited */ static const uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; /** The default timeframe for -maxuploadtarget. 1 day. */ static const uint64_t MAX_UPLOAD_TIMEFRAME = 60 * 60 * 24; /** Default for blocks only*/ static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; static const bool DEFAULT_FORCEDNSSEED = false; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; typedef int64_t NodeId; /** * Special NodeId that represent no node. */ static constexpr NodeId NO_NODE = -1; struct AddedNodeInfo { std::string strAddedNode; CService resolvedAddress; bool fConnected; bool fInbound; }; struct CNodeStats; class CClientUIInterface; struct CSerializedNetMsg { CSerializedNetMsg() = default; CSerializedNetMsg(CSerializedNetMsg &&) = default; CSerializedNetMsg &operator=(CSerializedNetMsg &&) = default; // No copying, only moves. CSerializedNetMsg(const CSerializedNetMsg &msg) = delete; CSerializedNetMsg &operator=(const CSerializedNetMsg &) = delete; std::vector data; std::string command; }; namespace { struct CConnmanTest; } class NetEventsInterface; class CConnman { public: enum NumConnections { CONNECTIONS_NONE = 0, CONNECTIONS_IN = (1U << 0), CONNECTIONS_OUT = (1U << 1), CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT), }; struct Options { ServiceFlags nLocalServices = NODE_NONE; int nMaxConnections = 0; int m_max_outbound_full_relay = 0; int m_max_outbound_block_relay = 0; int nMaxAddnode = 0; int nMaxFeeler = 0; int nBestHeight = 0; CClientUIInterface *uiInterface = nullptr; NetEventsInterface *m_msgproc = nullptr; BanMan *m_banman = nullptr; unsigned int nSendBufferMaxSize = 0; unsigned int nReceiveFloodSize = 0; uint64_t nMaxOutboundTimeframe = 0; uint64_t nMaxOutboundLimit = 0; int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT; std::vector vSeedNodes; std::vector vWhitelistedRange; std::vector vWhiteBinds; std::vector vBinds; bool m_use_addrman_outgoing = true; std::vector m_specified_outgoing; std::vector m_added_nodes; }; void Init(const Options &connOptions) { nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; m_max_outbound_full_relay = std::min( connOptions.m_max_outbound_full_relay, connOptions.nMaxConnections); m_max_outbound_block_relay = connOptions.m_max_outbound_block_relay; m_use_addrman_outgoing = connOptions.m_use_addrman_outgoing; nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; m_max_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + nMaxFeeler; nBestHeight = connOptions.nBestHeight; clientInterface = connOptions.uiInterface; m_banman = connOptions.m_banman; m_msgproc = connOptions.m_msgproc; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; m_peer_connect_timeout = connOptions.m_peer_connect_timeout; { LOCK(cs_totalBytesSent); nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; } vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(cs_vAddedNodes); vAddedNodes = connOptions.m_added_nodes; } } CConnman(const Config &configIn, uint64_t seed0, uint64_t seed1); ~CConnman(); bool Start(CScheduler &scheduler, const Options &options); // TODO: Remove NO_THREAD_SAFETY_ANALYSIS. Lock cs_vNodes before reading the // variable vNodes. // // When removing NO_THREAD_SAFETY_ANALYSIS be aware of the following lock // order requirements: // * CheckForStaleTipAndEvictPeers locks cs_main before indirectly calling // GetExtraOutboundCount which locks cs_vNodes. // * ProcessMessage locks cs_main and g_cs_orphans before indirectly calling // ForEachNode which locks cs_vNodes. // // Thus the implicit locking order requirement is: (1) cs_main, (2) // g_cs_orphans, (3) cs_vNodes. void Stop() NO_THREAD_SAFETY_ANALYSIS; void Interrupt(); bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); void OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false, bool block_relay_only = false); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function func); void PushMessage(CNode *pnode, CSerializedNetMsg &&msg); template void ForEachNode(Callable &&func) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNode(Callable &&func) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; // Addrman functions size_t GetAddressCount() const; void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress &addr); void AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty = 0); std::vector GetAddresses(); // This allows temporarily exceeding m_max_outbound_full_relay, with the // goal of finding a peer that is better than all our current peers. void SetTryNewOutboundPeer(bool flag); bool GetTryNewOutboundPeer(); // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may // return a value less than (num_outbound_connections - num_outbound_slots) // in cases where some outbound connections are not yet fully connected, or // not yet fully disconnected. int GetExtraOutboundCount(); bool AddNode(const std::string &node); bool RemoveAddedNode(const std::string &node); std::vector GetAddedNodeInfo(); size_t GetNodeCount(NumConnections num); void GetNodeStats(std::vector &vstats); bool DisconnectNode(const std::string &node); bool DisconnectNode(const CSubNet &subnet); bool DisconnectNode(const CNetAddr &addr); bool DisconnectNode(NodeId id); ServiceFlags GetLocalServices() const; //! set the max outbound target in bytes. void SetMaxOutboundTarget(uint64_t limit); uint64_t GetMaxOutboundTarget(); //! set the timeframe for the max outbound target. void SetMaxOutboundTimeframe(uint64_t timeframe); uint64_t GetMaxOutboundTimeframe(); //! check if the outbound target is reached. If param //! historicalBlockServingLimit is set true, the function will response true //! if the limit for serving historical blocks has been reached. bool OutboundTargetReached(bool historicalBlockServingLimit); //! response the bytes left in the current max outbound cycle in case of no //! limit, it will always response 0 uint64_t GetOutboundTargetBytesLeft(); //! response the time in second left in the current max outbound cycle in //! case of no limit, it will always response 0 uint64_t GetMaxOutboundTimeLeftInCycle(); uint64_t GetTotalBytesRecv(); uint64_t GetTotalBytesSent(); void SetBestHeight(int height); int GetBestHeight() const; /** Get a unique deterministic randomizer. */ CSipHasher GetDeterministicRandomizer(uint64_t id) const; unsigned int GetReceiveFloodSize() const; void WakeMessageHandler(); /** * Attempts to obfuscate tx time through exponentially distributed emitting. * Works assuming that a single interval is used. * Variable intervals will result in privacy decrease. */ int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds); private: struct ListenSocket { public: SOCKET socket; inline void AddSocketPermissionFlags(NetPermissionFlags &flags) const { NetPermissions::AddFlag(flags, m_permissions); } ListenSocket(SOCKET socket_, NetPermissionFlags permissions_) : socket(socket_), m_permissions(permissions_) {} private: NetPermissionFlags m_permissions; }; bool BindListenPort(const CService &bindAddr, std::string &strError, NetPermissionFlags permissions); bool Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector &binds, const std::vector &whiteBinds); void ThreadOpenAddedConnections(); void AddOneShot(const std::string &strDest); void ProcessOneShot(); void ThreadOpenConnections(std::vector connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket &hListenSocket); void DisconnectNodes(); void NotifyNumConnectionsChanged(); void InactivityCheck(CNode *pnode); bool GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketHandler(); void ThreadSocketHandler(); void ThreadDNSAddressSeed(); uint64_t CalculateKeyedNetGroup(const CAddress &ad) const; CNode *FindNode(const CNetAddr &ip); CNode *FindNode(const CSubNet &subNet); CNode *FindNode(const std::string &addrName); CNode *FindNode(const CService &addr); bool AttemptToEvictConnection(); CNode *ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only); void AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const; void DeleteNode(CNode *pnode); NodeId GetNewNodeId(); size_t SocketSendData(CNode *pnode) const; void DumpAddresses(); // Network stats void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); // Whether the node should be passed out in ForEach* callbacks static bool NodeFullyConnected(const CNode *pnode); const Config *config; // Network usage totals RecursiveMutex cs_totalBytesRecv; RecursiveMutex cs_totalBytesSent; uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv); uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent); // outbound limit & stats uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundTimeframe GUARDED_BY(cs_totalBytesSent); // P2P timeout in seconds int64_t m_peer_connect_timeout; // Whitelisted ranges. Any node connecting from these is automatically // whitelisted (as well as those connecting to whitelisted binds). std::vector vWhitelistedRange; unsigned int nSendBufferMaxSize{0}; unsigned int nReceiveFloodSize{0}; std::vector vhListenSocket; std::atomic fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; std::deque vOneShots GUARDED_BY(cs_vOneShots); RecursiveMutex cs_vOneShots; std::vector vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector vNodes GUARDED_BY(cs_vNodes); std::list vNodesDisconnected; mutable RecursiveMutex cs_vNodes; std::atomic nLastNodeId{0}; unsigned int nPrevNodeCount{0}; /** Services this instance offers */ ServiceFlags nLocalServices; std::unique_ptr semOutbound; std::unique_ptr semAddnode; int nMaxConnections; // How many full-relay (tx, block, addr) outbound peers we want int m_max_outbound_full_relay; // How many block-relay only outbound peers we want // We do not relay tx or addr messages with these peers int m_max_outbound_block_relay; int nMaxAddnode; int nMaxFeeler; int m_max_outbound; bool m_use_addrman_outgoing; std::atomic nBestHeight; CClientUIInterface *clientInterface; NetEventsInterface *m_msgproc; BanMan *m_banman; /** SipHasher seeds for deterministic randomness */ const uint64_t nSeed0, nSeed1; /** flag for waking the message processor. */ - bool fMsgProcWake; + bool fMsgProcWake GUARDED_BY(mutexMsgProc); std::condition_variable condMsgProc; Mutex mutexMsgProc; std::atomic flagInterruptMsgProc{false}; CThreadInterrupt interruptNet; std::thread threadDNSAddressSeed; std::thread threadSocketHandler; std::thread threadOpenAddedConnections; std::thread threadOpenConnections; std::thread threadMessageHandler; /** * flag for deciding to connect to an extra outbound peer, in excess of * m_max_outbound_full_relay. This takes the place of a feeler connection. */ std::atomic_bool m_try_another_outbound_peer; std::atomic m_next_send_inv_to_incoming{0}; friend struct ::CConnmanTest; }; void Discover(); void StartMapPort(); void InterruptMapPort(); void StopMapPort(); unsigned short GetListenPort(); /** * Interface for message handling */ class NetEventsInterface { public: virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual bool SendMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual void InitializeNode(const Config &config, CNode *pnode) = 0; virtual void FinalizeNode(const Config &config, NodeId id, bool &update_connection_time) = 0; protected: /** * Protected destructor so that instances can only be deleted by derived * classes. If that restriction is no longer desired, this should be made * public and virtual. */ ~NetEventsInterface() = default; }; enum { // unknown LOCAL_NONE, // address a local interface listens on LOCAL_IF, // address explicit bound to LOCAL_BIND, // address reported by UPnP LOCAL_UPNP, // address explicitly specified (-externalip=) LOCAL_MANUAL, LOCAL_MAX }; bool IsPeerAddrLocalGood(CNode *pnode); void AdvertiseLocal(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) * @note Networks are reachable by default */ void SetReachable(enum Network net, bool reachable); /** @returns true if the network is reachable, false otherwise */ bool IsReachable(enum Network net); /** @returns true if the address is in a reachable network, false otherwise */ bool IsReachable(const CNetAddr &addr); bool AddLocal(const CService &addr, int nScore = LOCAL_NONE); bool AddLocal(const CNetAddr &addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService &addr); bool SeenLocal(const CService &addr); bool IsLocal(const CService &addr); bool GetLocal(CService &addr, const CNetAddr *paddrPeer = nullptr); CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices); extern bool fDiscover; extern bool fListen; extern bool g_relay_txes; struct LocalServiceInfo { int nScore; int nPort; }; extern RecursiveMutex cs_mapLocalHost; extern std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); extern const std::string NET_MESSAGE_COMMAND_OTHER; // Command, total bytes typedef std::map mapMsgCmdSize; /** * POD that contains various stats about a node. * Usually constructed from CConman::GetNodeStats. Stats are filled from the * node using CNode::copyStats. */ struct CNodeStats { NodeId nodeid; ServiceFlags nServices; bool fRelayTxes; int64_t nLastSend; int64_t nLastRecv; int64_t nTimeConnected; int64_t nTimeOffset; std::string addrName; int nVersion; std::string cleanSubVer; bool fInbound; bool m_manual_connection; int nStartingHeight; uint64_t nSendBytes; mapMsgCmdSize mapSendBytesPerMsgCmd; uint64_t nRecvBytes; mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; bool m_legacyWhitelisted; int64_t m_ping_usec; int64_t m_ping_wait_usec; int64_t m_min_ping_usec; Amount minFeeFilter; // Our address, as reported by the peer std::string addrLocal; // Address of this peer CAddress addr; // Bind address of our side of the connection CAddress addrBind; }; class CNetMessage { private: mutable CHash256 hasher; mutable uint256 data_hash; public: // Parsing header (false) or data (true) bool in_data; // Partially received header. CDataStream hdrbuf; // Complete header. CMessageHeader hdr; uint32_t nHdrPos; // Received message data. CDataStream vRecv; uint32_t nDataPos; // Time (in microseconds) of message receipt. int64_t nTime; CNetMessage(const CMessageHeader::MessageMagic &pchMessageStartIn, int nTypeIn, int nVersionIn) : hdrbuf(nTypeIn, nVersionIn), hdr(pchMessageStartIn), vRecv(nTypeIn, nVersionIn) { hdrbuf.resize(24); in_data = false; nHdrPos = 0; nDataPos = 0; nTime = 0; } bool complete() const { if (!in_data) { return false; } return (hdr.nMessageSize == nDataPos); } const uint256 &GetMessageHash() const; void SetVersion(int nVersionIn) { hdrbuf.SetVersion(nVersionIn); vRecv.SetVersion(nVersionIn); } int readHeader(const Config &config, const char *pch, uint32_t nBytes); int readData(const char *pch, uint32_t nBytes); }; /** Information about a peer */ class CNode { friend class CConnman; public: // socket std::atomic nServices{NODE_NONE}; SOCKET hSocket GUARDED_BY(cs_hSocket); // Total size of all vSendMsg entries. size_t nSendSize{0}; // Offset inside the first vSendMsg already sent. size_t nSendOffset{0}; uint64_t nSendBytes GUARDED_BY(cs_vSend){0}; std::deque> vSendMsg GUARDED_BY(cs_vSend); RecursiveMutex cs_vSend; RecursiveMutex cs_hSocket; RecursiveMutex cs_vRecv; RecursiveMutex cs_vProcessMsg; std::list vProcessMsg GUARDED_BY(cs_vProcessMsg); size_t nProcessQueueSize{0}; RecursiveMutex cs_sendProcessing; std::deque vRecvGetData; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic nRecvVersion{INIT_PROTO_VERSION}; std::atomic nLastSend{0}; std::atomic nLastRecv{0}; const int64_t nTimeConnected; std::atomic nTimeOffset{0}; // Address of this peer const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; std::atomic nVersion{0}; RecursiveMutex cs_SubVer; /** * cleanSubVer is a sanitized string of the user agent byte array we read * from the wire. This cleaned string can safely be logged or displayed. */ std::string cleanSubVer GUARDED_BY(cs_SubVer){}; // This peer is preferred for eviction. bool m_prefer_evict{false}; bool HasPermission(NetPermissionFlags permission) const { return NetPermissions::HasFlag(m_permissionFlags, permission); } // This boolean is unusued in actual processing, only present for backward // compatibility at RPC/QT level bool m_legacyWhitelisted{false}; // If true this node is being used as a short lived feeler. bool fFeeler{false}; bool fOneShot{false}; bool m_manual_connection{false}; // set by version message bool fClient{false}; // after BIP159, set by version message bool m_limited_node{false}; const bool fInbound; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs std::atomic_bool fDisconnect{false}; bool fSentAddr{false}; CSemaphoreGrant grantOutbound; std::atomic nRefCount{0}; const uint64_t nKeyedNetGroup; std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); public: BlockHash hashContinue; std::atomic nStartingHeight{-1}; // flood relay std::vector vAddrToSend; CRollingBloomFilter addrKnown; bool fGetAddr{false}; int64_t nNextAddrSend GUARDED_BY(cs_sendProcessing){0}; int64_t nNextLocalAddrSend GUARDED_BY(cs_sendProcessing){0}; const bool m_addr_relay_peer; bool IsAddrRelayPeer() const { return m_addr_relay_peer; } // List of block ids we still have to announce. // There is no final sorting before sending, as they are always sent // immediately and in the order requested. std::vector vInventoryBlockToSend GUARDED_BY(cs_inventory); RecursiveMutex cs_inventory; struct TxRelay { TxRelay() { pfilter = std::make_unique(); } mutable RecursiveMutex cs_filter; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's // version message. // b) the peer may tell us in its version message that we should not // relay tx invs unless it loads a bloom filter. bool fRelayTxes GUARDED_BY(cs_filter){false}; std::unique_ptr pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter); mutable RecursiveMutex cs_tx_inventory; CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){ 50000, 0.000001}; // Set of transaction ids we still have to announce. // They are sorted by the mempool before relay, so the order is not // important. std::set setInventoryTxToSend; // Used for BIP35 mempool sending bool fSendMempool GUARDED_BY(cs_tx_inventory){false}; // Last time a "MEMPOOL" request was serviced. std::atomic m_last_mempool_req{ std::chrono::seconds{0}}; int64_t nNextInvSend{0}; RecursiveMutex cs_feeFilter; // Minimum fee rate with which to filter inv's to this node Amount minFeeFilter GUARDED_BY(cs_feeFilter){Amount::zero()}; Amount lastSentFeeFilter{Amount::zero()}; int64_t nextSendTimeFeeFilter{0}; }; // m_tx_relay == nullptr if we're not relaying transactions with this peer std::unique_ptr m_tx_relay; // Used for headers announcements - unfiltered blocks to relay std::vector vBlockHashesToAnnounce GUARDED_BY(cs_inventory); // Block and TXN accept times std::atomic nLastBlockTime{0}; std::atomic nLastTXTime{0}; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. std::atomic nPingNonceSent{0}; // Time (in usec) the last ping was sent, or 0 if no ping was ever sent. std::atomic nPingUsecStart{0}; // Last measured round-trip time. std::atomic nPingUsecTime{0}; // Best measured round-trip time. std::atomic nMinPingUsecTime{std::numeric_limits::max()}; // Whether a ping is requested. std::atomic fPingQueued{false}; std::set orphan_work_set; CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn = "", bool fInboundIn = false, bool block_relay_only = false); ~CNode(); CNode(const CNode &) = delete; CNode &operator=(const CNode &) = delete; private: const NodeId id; const uint64_t nLocalHostNonce; // Services offered to this peer const ServiceFlags nLocalServices; const int nMyStartingHeight; int nSendVersion{0}; NetPermissionFlags m_permissionFlags{PF_NONE}; // Used only by SocketHandler thread std::list vRecvMsg; mutable RecursiveMutex cs_addrName; std::string addrName GUARDED_BY(cs_addrName); // Our address, as reported by the peer CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; public: NodeId GetId() const { return id; } uint64_t GetLocalNonce() const { return nLocalHostNonce; } int GetMyStartingHeight() const { return nMyStartingHeight; } int GetRefCount() const { assert(nRefCount >= 0); return nRefCount; } bool ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete); void SetRecvVersion(int nVersionIn) { nRecvVersion = nVersionIn; } int GetRecvVersion() const { return nRecvVersion; } void SetSendVersion(int nVersionIn); int GetSendVersion() const; CService GetAddrLocal() const; //! May not be called more than once void SetAddrLocal(const CService &addrLocalIn); CNode *AddRef() { nRefCount++; return this; } void Release() { nRefCount--; } void AddAddressKnown(const CAddress &_addr) { addrKnown.insert(_addr.GetKey()); } void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand) { // Known checking here is only to save space from duplicates. // SendMessages will filter it again for knowns that were added // after addresses were pushed. if (_addr.IsValid() && !addrKnown.contains(_addr.GetKey())) { if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) { vAddrToSend[insecure_rand.randrange(vAddrToSend.size())] = _addr; } else { vAddrToSend.push_back(_addr); } } } void AddInventoryKnown(const CInv &inv) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); m_tx_relay->filterInventoryKnown.insert(inv.hash); } } void PushInventory(const CInv &inv) { if (inv.type == MSG_TX && m_tx_relay != nullptr) { const TxId txid(inv.hash); LOCK(m_tx_relay->cs_tx_inventory); if (!m_tx_relay->filterInventoryKnown.contains(txid)) { m_tx_relay->setInventoryTxToSend.insert(txid); } } else if (inv.type == MSG_BLOCK) { const BlockHash hash(inv.hash); LOCK(cs_inventory); vInventoryBlockToSend.push_back(hash); } } void PushBlockHash(const BlockHash &hash) { LOCK(cs_inventory); vBlockHashesToAnnounce.push_back(hash); } void CloseSocketDisconnect(); void copyStats(CNodeStats &stats); ServiceFlags GetLocalServices() const { return nLocalServices; } std::string GetAddrName() const; //! Sets the addrName only if it was not previously set void MaybeSetAddrName(const std::string &addrNameIn); }; /** * Return a timestamp in the future (in microseconds) for exponentially * distributed events. */ int64_t PoissonNextSend(int64_t now, int average_interval_seconds); std::string getSubVersionEB(uint64_t MaxBlockSize); std::string userAgent(const Config &config); #endif // BITCOIN_NET_H diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 987b84e70..3f7102a4b 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1,2689 +1,2691 @@ // Copyright (c) 2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include