diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h index 0e4828a4c..2c531779e 100644 --- a/src/avalanche/processor.h +++ b/src/avalanche/processor.h @@ -1,301 +1,296 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PROCESSOR_H #define BITCOIN_AVALANCHE_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class Config; class CBlockIndex; class CScheduler; /** * Is avalanche enabled by default. */ static constexpr bool AVALANCHE_DEFAULT_ENABLED = false; /** * Finalization score. */ static constexpr int AVALANCHE_FINALIZATION_SCORE = 128; /** * Maximum item that can be polled at once. */ static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16; /** * Avalanche default cooldown in milliseconds. */ static constexpr size_t AVALANCHE_DEFAULT_COOLDOWN = 100; /** * How long before we consider that a query timed out. */ static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{ 10000}; /** * How many inflight requests can exist for one item. */ static constexpr int AVALANCHE_MAX_INFLIGHT_POLL = 10; -/** - * Special NodeId that represent no node. - */ -static constexpr NodeId NO_NODE = -1; - /** * Vote history. */ struct VoteRecord { private: // confidence's LSB bit is the result. Higher bits are actual confidence // score. uint16_t confidence = 0; // Historical record of votes. uint8_t votes = 0; // Each bit indicate if the vote is to be considered. uint8_t consider = 0; // How many in flight requests exists for this element. mutable std::atomic inflight{0}; // Seed for pseudorandom operations. const uint32_t seed = 0; // Track how many successful votes occured. uint32_t successfulVotes = 0; // Track the nodes which are part of the quorum. std::array nodeFilter{{0, 0, 0, 0, 0, 0, 0, 0}}; public: explicit VoteRecord(bool accepted) : confidence(accepted) {} /** * Copy semantic */ VoteRecord(const VoteRecord &other) : confidence(other.confidence), votes(other.votes), consider(other.consider), inflight(other.inflight.load()), successfulVotes(other.successfulVotes), nodeFilter(other.nodeFilter) { } /** * Vote accounting facilities. */ bool isAccepted() const { return confidence & 0x01; } uint16_t getConfidence() const { return confidence >> 1; } bool hasFinalized() const { return getConfidence() >= AVALANCHE_FINALIZATION_SCORE; } /** * Register a new vote for an item and update confidence accordingly. * Returns true if the acceptance or finalization state changed. */ bool registerVote(NodeId nodeid, uint32_t error); /** * Register that a request is being made regarding that item. * The method is made const so that it can be accessed via a read only view * of vote_records. It's not a problem as it is made thread safe. */ bool registerPoll() const; /** * Return if this item is in condition to be polled at the moment. */ bool shouldPoll() const { return inflight < AVALANCHE_MAX_INFLIGHT_POLL; } /** * Clear `count` inflight requests. */ void clearInflightRequest(uint8_t count = 1) { inflight -= count; } private: /** * Add the node to the quorum. * Returns true if the node was added, false if the node already was in the * quorum. */ bool addNodeToQuorum(NodeId nodeid); }; class AvalancheBlockUpdate { union { CBlockIndex *pindex; uintptr_t raw; }; static const size_t STATUS_BITS = 2; static const uintptr_t MASK = (1 << STATUS_BITS) - 1; static_assert( alignof(CBlockIndex) >= (1 << STATUS_BITS), "CBlockIndex alignement doesn't allow for Status to be stored."); public: enum Status : uint8_t { Invalid, Rejected, Accepted, Finalized, }; AvalancheBlockUpdate(CBlockIndex *pindexIn, Status statusIn) : pindex(pindexIn) { raw |= statusIn; } Status getStatus() const { return Status(raw & MASK); } CBlockIndex *getBlockIndex() { return reinterpret_cast(raw & ~MASK); } const CBlockIndex *getBlockIndex() const { return const_cast(this)->getBlockIndex(); } }; using BlockVoteMap = std::map; struct next_request_time {}; struct query_timeout {}; class AvalancheProcessor { private: CConnman *connman; std::chrono::milliseconds queryTimeoutDuration; /** * Blocks to run avalanche on. */ RWCollection vote_records; /** * Keep track of peers and queries sent. */ std::atomic round; using TimePoint = std::chrono::time_point; struct Peer { NodeId nodeid; int64_t score; TimePoint nextRequestTime; CPubKey pubkey; }; using PeerSet = boost::multi_index_container< Peer, boost::multi_index::indexed_by< // index by nodeid boost::multi_index::hashed_unique< boost::multi_index::member>, // sorted by nextRequestTime boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::member>>>; RWCollection peerSet; struct Query { NodeId nodeid; uint64_t round; TimePoint timeout; /** * We declare this as mutable so it can be modified in the multi_index. * This is ok because we do not use this field to index in anyway. * * /!\ Do not use any mutable field as index. */ mutable std::vector invs; }; using QuerySet = boost::multi_index_container< Query, boost::multi_index::indexed_by< // index by nodeid/round boost::multi_index::ordered_unique< boost::multi_index::composite_key< Query, boost::multi_index::member, boost::multi_index::member>>, // sorted by timeout boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::member>>>; RWCollection queries; /** The key used to sign responses. */ CKey sessionKey; /** Event loop machinery. */ EventLoop eventLoop; public: explicit AvalancheProcessor(CConnman *connmanIn); ~AvalancheProcessor(); void setQueryTimeoutDuration(std::chrono::milliseconds d) { queryTimeoutDuration = d; } bool addBlockToReconcile(const CBlockIndex *pindex); bool isAccepted(const CBlockIndex *pindex) const; int getConfidence(const CBlockIndex *pindex) const; // TDOD: Refactor the API to remove the dependency on avalanche/protocol.h void sendResponse(CNode *pfrom, AvalancheResponse response) const; bool registerVotes(NodeId nodeid, const AvalancheResponse &response, std::vector &updates); bool addPeer(NodeId nodeid, int64_t score, CPubKey pubkey); CPubKey getPubKey(NodeId nodeid) const; CPubKey getSessionPubKey() const { return sessionKey.GetPubKey(); } bool startEventLoop(CScheduler &scheduler); bool stopEventLoop(); private: void runEventLoop(); void clearTimedoutRequests(); std::vector getInvsForNextPoll(bool forPoll = true); NodeId getSuitableNodeToQuery(); friend struct AvalancheTest; }; /** * Global avalanche instance. */ extern std::unique_ptr g_avalanche; #endif // BITCOIN_AVALANCHE_PROCESSOR_H diff --git a/src/net.h b/src/net.h index 70e2e84c0..05ebfe371 100644 --- a/src/net.h +++ b/src/net.h @@ -1,944 +1,949 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NET_H #define BITCOIN_NET_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #endif class BanMan; class Config; class CNode; class CScheduler; /** Default for -whitelistrelay. */ static const bool DEFAULT_WHITELISTRELAY = true; /** Default for -whitelistforcerelay. */ static const bool DEFAULT_WHITELISTFORCERELAY = false; /** * Time between pings automatically sent out for latency probing and keepalive * (in seconds). */ static const int PING_INTERVAL = 2 * 60; /** * Time after which to disconnect, after waiting for a ping response (or * inactivity). */ static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; /** The maximum number of entries in an 'inv' protocol message */ static const unsigned int MAX_INV_SZ = 50000; static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv), "Max protocol message length must be greater than largest " "possible INV message"); /** The maximum number of entries in a locator */ static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of new addresses to accumulate before announcing. */ static const unsigned int MAX_ADDR_TO_SEND = 1000; /** Maximum length of the user agent string in `version` message */ static const unsigned int MAX_SUBVERSION_LENGTH = 256; /** * Maximum number of automatic outgoing nodes over which we'll relay everything * (blocks, tx, addrs, etc) */ static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8; /** Maximum number of addnode outgoing nodes */ static const int MAX_ADDNODE_CONNECTIONS = 8; /** Maximum number of block-relay-only outgoing connections */ static const int MAX_BLOCKS_ONLY_CONNECTIONS = 2; /** -listen default */ static const bool DEFAULT_LISTEN = true; /** -upnp default */ #ifdef USE_UPNP static const bool DEFAULT_UPNP = USE_UPNP; #else static const bool DEFAULT_UPNP = false; #endif /** The maximum number of peer connections to maintain. */ static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 125; /** The default for -maxuploadtarget. 0 = Unlimited */ static const uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; /** The default timeframe for -maxuploadtarget. 1 day. */ static const uint64_t MAX_UPLOAD_TIMEFRAME = 60 * 60 * 24; /** Default for blocks only*/ static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; static const bool DEFAULT_FORCEDNSSEED = false; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; typedef int64_t NodeId; +/** + * Special NodeId that represent no node. + */ +static constexpr NodeId NO_NODE = -1; + struct AddedNodeInfo { std::string strAddedNode; CService resolvedAddress; bool fConnected; bool fInbound; }; struct CNodeStats; class CClientUIInterface; struct CSerializedNetMsg { CSerializedNetMsg() = default; CSerializedNetMsg(CSerializedNetMsg &&) = default; CSerializedNetMsg &operator=(CSerializedNetMsg &&) = default; // No copying, only moves. CSerializedNetMsg(const CSerializedNetMsg &msg) = delete; CSerializedNetMsg &operator=(const CSerializedNetMsg &) = delete; std::vector data; std::string command; }; class NetEventsInterface; class CConnman { public: enum NumConnections { CONNECTIONS_NONE = 0, CONNECTIONS_IN = (1U << 0), CONNECTIONS_OUT = (1U << 1), CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT), }; struct Options { ServiceFlags nLocalServices = NODE_NONE; int nMaxConnections = 0; int m_max_outbound_full_relay = 0; int m_max_outbound_block_relay = 0; int nMaxAddnode = 0; int nMaxFeeler = 0; int nBestHeight = 0; CClientUIInterface *uiInterface = nullptr; NetEventsInterface *m_msgproc = nullptr; BanMan *m_banman = nullptr; unsigned int nSendBufferMaxSize = 0; unsigned int nReceiveFloodSize = 0; uint64_t nMaxOutboundTimeframe = 0; uint64_t nMaxOutboundLimit = 0; int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT; std::vector vSeedNodes; std::vector vWhitelistedRange; std::vector vWhiteBinds; std::vector vBinds; bool m_use_addrman_outgoing = true; std::vector m_specified_outgoing; std::vector m_added_nodes; }; void Init(const Options &connOptions) { nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; m_max_outbound_full_relay = std::min( connOptions.m_max_outbound_full_relay, connOptions.nMaxConnections); m_max_outbound_block_relay = connOptions.m_max_outbound_block_relay; m_use_addrman_outgoing = connOptions.m_use_addrman_outgoing; nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; m_max_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + nMaxFeeler; nBestHeight = connOptions.nBestHeight; clientInterface = connOptions.uiInterface; m_banman = connOptions.m_banman; m_msgproc = connOptions.m_msgproc; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; m_peer_connect_timeout = connOptions.m_peer_connect_timeout; { LOCK(cs_totalBytesSent); nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; } vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(cs_vAddedNodes); vAddedNodes = connOptions.m_added_nodes; } } CConnman(const Config &configIn, uint64_t seed0, uint64_t seed1); ~CConnman(); bool Start(CScheduler &scheduler, const Options &options); // TODO: Remove NO_THREAD_SAFETY_ANALYSIS. Lock cs_vNodes before reading the // variable vNodes. // // When removing NO_THREAD_SAFETY_ANALYSIS be aware of the following lock // order requirements: // * CheckForStaleTipAndEvictPeers locks cs_main before indirectly calling // GetExtraOutboundCount which locks cs_vNodes. // * ProcessMessage locks cs_main and g_cs_orphans before indirectly calling // ForEachNode which locks cs_vNodes. // // Thus the implicit locking order requirement is: (1) cs_main, (2) // g_cs_orphans, (3) cs_vNodes. void Stop() NO_THREAD_SAFETY_ANALYSIS; void Interrupt(); bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); void OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false, bool block_relay_only = false); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function func); void PushMessage(CNode *pnode, CSerializedNetMsg &&msg); template void ForEachNode(Callable &&func) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNode(Callable &&func) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; // Addrman functions size_t GetAddressCount() const; void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress &addr); void AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty = 0); std::vector GetAddresses(); // This allows temporarily exceeding m_max_outbound_full_relay, with the // goal of finding a peer that is better than all our current peers. void SetTryNewOutboundPeer(bool flag); bool GetTryNewOutboundPeer(); // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may // return a value less than (num_outbound_connections - num_outbound_slots) // in cases where some outbound connections are not yet fully connected, or // not yet fully disconnected. int GetExtraOutboundCount(); bool AddNode(const std::string &node); bool RemoveAddedNode(const std::string &node); std::vector GetAddedNodeInfo(); size_t GetNodeCount(NumConnections num); void GetNodeStats(std::vector &vstats); bool DisconnectNode(const std::string &node); bool DisconnectNode(const CSubNet &subnet); bool DisconnectNode(const CNetAddr &addr); bool DisconnectNode(NodeId id); ServiceFlags GetLocalServices() const; //! set the max outbound target in bytes. void SetMaxOutboundTarget(uint64_t limit); uint64_t GetMaxOutboundTarget(); //! set the timeframe for the max outbound target. void SetMaxOutboundTimeframe(uint64_t timeframe); uint64_t GetMaxOutboundTimeframe(); //! check if the outbound target is reached. If param //! historicalBlockServingLimit is set true, the function will response true //! if the limit for serving historical blocks has been reached. bool OutboundTargetReached(bool historicalBlockServingLimit); //! response the bytes left in the current max outbound cycle in case of no //! limit, it will always response 0 uint64_t GetOutboundTargetBytesLeft(); //! response the time in second left in the current max outbound cycle in //! case of no limit, it will always response 0 uint64_t GetMaxOutboundTimeLeftInCycle(); uint64_t GetTotalBytesRecv(); uint64_t GetTotalBytesSent(); void SetBestHeight(int height); int GetBestHeight() const; /** Get a unique deterministic randomizer. */ CSipHasher GetDeterministicRandomizer(uint64_t id) const; unsigned int GetReceiveFloodSize() const; void WakeMessageHandler(); /** * Attempts to obfuscate tx time through exponentially distributed emitting. * Works assuming that a single interval is used. * Variable intervals will result in privacy decrease. */ int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds); private: struct ListenSocket { public: SOCKET socket; inline void AddSocketPermissionFlags(NetPermissionFlags &flags) const { NetPermissions::AddFlag(flags, m_permissions); } ListenSocket(SOCKET socket_, NetPermissionFlags permissions_) : socket(socket_), m_permissions(permissions_) {} private: NetPermissionFlags m_permissions; }; bool BindListenPort(const CService &bindAddr, std::string &strError, NetPermissionFlags permissions); bool Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector &binds, const std::vector &whiteBinds); void ThreadOpenAddedConnections(); void AddOneShot(const std::string &strDest); void ProcessOneShot(); void ThreadOpenConnections(std::vector connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket &hListenSocket); void DisconnectNodes(); void NotifyNumConnectionsChanged(); void InactivityCheck(CNode *pnode); bool GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketHandler(); void ThreadSocketHandler(); void ThreadDNSAddressSeed(); uint64_t CalculateKeyedNetGroup(const CAddress &ad) const; CNode *FindNode(const CNetAddr &ip); CNode *FindNode(const CSubNet &subNet); CNode *FindNode(const std::string &addrName); CNode *FindNode(const CService &addr); bool AttemptToEvictConnection(); CNode *ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only); void AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const; void DeleteNode(CNode *pnode); NodeId GetNewNodeId(); size_t SocketSendData(CNode *pnode) const; void DumpAddresses(); // Network stats void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); // Whether the node should be passed out in ForEach* callbacks static bool NodeFullyConnected(const CNode *pnode); const Config *config; // Network usage totals RecursiveMutex cs_totalBytesRecv; RecursiveMutex cs_totalBytesSent; uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv); uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent); // outbound limit & stats uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundTimeframe GUARDED_BY(cs_totalBytesSent); // P2P timeout in seconds int64_t m_peer_connect_timeout; // Whitelisted ranges. Any node connecting from these is automatically // whitelisted (as well as those connecting to whitelisted binds). std::vector vWhitelistedRange; unsigned int nSendBufferMaxSize{0}; unsigned int nReceiveFloodSize{0}; std::vector vhListenSocket; std::atomic fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; std::deque vOneShots GUARDED_BY(cs_vOneShots); RecursiveMutex cs_vOneShots; std::vector vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector vNodes GUARDED_BY(cs_vNodes); std::list vNodesDisconnected; mutable RecursiveMutex cs_vNodes; std::atomic nLastNodeId{0}; unsigned int nPrevNodeCount{0}; /** Services this instance offers */ ServiceFlags nLocalServices; std::unique_ptr semOutbound; std::unique_ptr semAddnode; int nMaxConnections; // How many full-relay (tx, block, addr) outbound peers we want int m_max_outbound_full_relay; // How many block-relay only outbound peers we want // We do not relay tx or addr messages with these peers int m_max_outbound_block_relay; int nMaxAddnode; int nMaxFeeler; int m_max_outbound; bool m_use_addrman_outgoing; std::atomic nBestHeight; CClientUIInterface *clientInterface; NetEventsInterface *m_msgproc; BanMan *m_banman; /** SipHasher seeds for deterministic randomness */ const uint64_t nSeed0, nSeed1; /** flag for waking the message processor. */ bool fMsgProcWake; std::condition_variable condMsgProc; Mutex mutexMsgProc; std::atomic flagInterruptMsgProc{false}; CThreadInterrupt interruptNet; std::thread threadDNSAddressSeed; std::thread threadSocketHandler; std::thread threadOpenAddedConnections; std::thread threadOpenConnections; std::thread threadMessageHandler; /** * flag for deciding to connect to an extra outbound peer, in excess of * m_max_outbound_full_relay. This takes the place of a feeler connection. */ std::atomic_bool m_try_another_outbound_peer; std::atomic m_next_send_inv_to_incoming{0}; friend struct CConnmanTest; }; void Discover(); void StartMapPort(); void InterruptMapPort(); void StopMapPort(); unsigned short GetListenPort(); /** * Interface for message handling */ class NetEventsInterface { public: virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual bool SendMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual void InitializeNode(const Config &config, CNode *pnode) = 0; virtual void FinalizeNode(const Config &config, NodeId id, bool &update_connection_time) = 0; protected: /** * Protected destructor so that instances can only be deleted by derived * classes. If that restriction is no longer desired, this should be made * public and virtual. */ ~NetEventsInterface() = default; }; enum { // unknown LOCAL_NONE, // address a local interface listens on LOCAL_IF, // address explicit bound to LOCAL_BIND, // address reported by UPnP LOCAL_UPNP, // address explicitly specified (-externalip=) LOCAL_MANUAL, LOCAL_MAX }; bool IsPeerAddrLocalGood(CNode *pnode); void AdvertiseLocal(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) * @note Networks are reachable by default */ void SetReachable(enum Network net, bool reachable); /** @returns true if the network is reachable, false otherwise */ bool IsReachable(enum Network net); /** @returns true if the address is in a reachable network, false otherwise */ bool IsReachable(const CNetAddr &addr); bool AddLocal(const CService &addr, int nScore = LOCAL_NONE); bool AddLocal(const CNetAddr &addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService &addr); bool SeenLocal(const CService &addr); bool IsLocal(const CService &addr); bool GetLocal(CService &addr, const CNetAddr *paddrPeer = nullptr); CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices); extern bool fDiscover; extern bool fListen; extern bool g_relay_txes; struct LocalServiceInfo { int nScore; int nPort; }; extern RecursiveMutex cs_mapLocalHost; extern std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); extern const std::string NET_MESSAGE_COMMAND_OTHER; // Command, total bytes typedef std::map mapMsgCmdSize; /** * POD that contains various stats about a node. * Usually constructed from CConman::GetNodeStats. Stats are filled from the * node using CNode::copyStats. */ struct CNodeStats { NodeId nodeid; ServiceFlags nServices; bool fRelayTxes; int64_t nLastSend; int64_t nLastRecv; int64_t nTimeConnected; int64_t nTimeOffset; std::string addrName; int nVersion; std::string cleanSubVer; bool fInbound; bool m_manual_connection; int nStartingHeight; uint64_t nSendBytes; mapMsgCmdSize mapSendBytesPerMsgCmd; uint64_t nRecvBytes; mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; bool m_legacyWhitelisted; int64_t m_ping_usec; int64_t m_ping_wait_usec; int64_t m_min_ping_usec; Amount minFeeFilter; // Our address, as reported by the peer std::string addrLocal; // Address of this peer CAddress addr; // Bind address of our side of the connection CAddress addrBind; }; class CNetMessage { private: mutable CHash256 hasher; mutable uint256 data_hash; public: // Parsing header (false) or data (true) bool in_data; // Partially received header. CDataStream hdrbuf; // Complete header. CMessageHeader hdr; uint32_t nHdrPos; // Received message data. CDataStream vRecv; uint32_t nDataPos; // Time (in microseconds) of message receipt. int64_t nTime; CNetMessage(const CMessageHeader::MessageMagic &pchMessageStartIn, int nTypeIn, int nVersionIn) : hdrbuf(nTypeIn, nVersionIn), hdr(pchMessageStartIn), vRecv(nTypeIn, nVersionIn) { hdrbuf.resize(24); in_data = false; nHdrPos = 0; nDataPos = 0; nTime = 0; } bool complete() const { if (!in_data) { return false; } return (hdr.nMessageSize == nDataPos); } const uint256 &GetMessageHash() const; void SetVersion(int nVersionIn) { hdrbuf.SetVersion(nVersionIn); vRecv.SetVersion(nVersionIn); } int readHeader(const Config &config, const char *pch, uint32_t nBytes); int readData(const char *pch, uint32_t nBytes); }; /** Information about a peer */ class CNode { friend class CConnman; public: // socket std::atomic nServices{NODE_NONE}; SOCKET hSocket GUARDED_BY(cs_hSocket); // Total size of all vSendMsg entries. size_t nSendSize{0}; // Offset inside the first vSendMsg already sent. size_t nSendOffset{0}; uint64_t nSendBytes GUARDED_BY(cs_vSend){0}; std::deque> vSendMsg GUARDED_BY(cs_vSend); RecursiveMutex cs_vSend; RecursiveMutex cs_hSocket; RecursiveMutex cs_vRecv; RecursiveMutex cs_vProcessMsg; std::list vProcessMsg GUARDED_BY(cs_vProcessMsg); size_t nProcessQueueSize{0}; RecursiveMutex cs_sendProcessing; std::deque vRecvGetData; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic nRecvVersion{INIT_PROTO_VERSION}; std::atomic nLastSend{0}; std::atomic nLastRecv{0}; const int64_t nTimeConnected; std::atomic nTimeOffset{0}; // Address of this peer const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; std::atomic nVersion{0}; RecursiveMutex cs_SubVer; /** * cleanSubVer is a sanitized string of the user agent byte array we read * from the wire. This cleaned string can safely be logged or displayed. */ std::string cleanSubVer GUARDED_BY(cs_SubVer){}; // This peer is preferred for eviction. bool m_prefer_evict{false}; bool HasPermission(NetPermissionFlags permission) const { return NetPermissions::HasFlag(m_permissionFlags, permission); } // This boolean is unusued in actual processing, only present for backward // compatibility at RPC/QT level bool m_legacyWhitelisted{false}; // If true this node is being used as a short lived feeler. bool fFeeler{false}; bool fOneShot{false}; bool m_manual_connection{false}; // set by version message bool fClient{false}; // after BIP159, set by version message bool m_limited_node{false}; const bool fInbound; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs std::atomic_bool fDisconnect{false}; bool fSentAddr{false}; CSemaphoreGrant grantOutbound; std::atomic nRefCount{0}; const uint64_t nKeyedNetGroup; std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); public: BlockHash hashContinue; std::atomic nStartingHeight{-1}; // flood relay std::vector vAddrToSend; CRollingBloomFilter addrKnown; bool fGetAddr{false}; int64_t nNextAddrSend GUARDED_BY(cs_sendProcessing){0}; int64_t nNextLocalAddrSend GUARDED_BY(cs_sendProcessing){0}; const bool m_addr_relay_peer; bool IsAddrRelayPeer() const { return m_addr_relay_peer; } // List of block ids we still have to announce. // There is no final sorting before sending, as they are always sent // immediately and in the order requested. std::vector vInventoryBlockToSend GUARDED_BY(cs_inventory); RecursiveMutex cs_inventory; struct TxRelay { TxRelay() { pfilter = std::make_unique(); } mutable RecursiveMutex cs_filter; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's // version message. // b) the peer may tell us in its version message that we should not // relay tx invs unless it loads a bloom filter. bool fRelayTxes GUARDED_BY(cs_filter){false}; std::unique_ptr pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter); mutable RecursiveMutex cs_tx_inventory; CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){ 50000, 0.000001}; // Set of transaction ids we still have to announce. // They are sorted by the mempool before relay, so the order is not // important. std::set setInventoryTxToSend; // Used for BIP35 mempool sending bool fSendMempool GUARDED_BY(cs_tx_inventory){false}; // Last time a "MEMPOOL" request was serviced. std::atomic m_last_mempool_req{ std::chrono::seconds{0}}; int64_t nNextInvSend{0}; RecursiveMutex cs_feeFilter; // Minimum fee rate with which to filter inv's to this node Amount minFeeFilter GUARDED_BY(cs_feeFilter){Amount::zero()}; Amount lastSentFeeFilter{Amount::zero()}; int64_t nextSendTimeFeeFilter{0}; }; // m_tx_relay == nullptr if we're not relaying transactions with this peer std::unique_ptr m_tx_relay; // Used for headers announcements - unfiltered blocks to relay std::vector vBlockHashesToAnnounce GUARDED_BY(cs_inventory); // Block and TXN accept times std::atomic nLastBlockTime{0}; std::atomic nLastTXTime{0}; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. std::atomic nPingNonceSent{0}; // Time (in usec) the last ping was sent, or 0 if no ping was ever sent. std::atomic nPingUsecStart{0}; // Last measured round-trip time. std::atomic nPingUsecTime{0}; // Best measured round-trip time. std::atomic nMinPingUsecTime{std::numeric_limits::max()}; // Whether a ping is requested. std::atomic fPingQueued{false}; std::set orphan_work_set; CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn = "", bool fInboundIn = false, bool block_relay_only = false); ~CNode(); CNode(const CNode &) = delete; CNode &operator=(const CNode &) = delete; private: const NodeId id; const uint64_t nLocalHostNonce; // Services offered to this peer const ServiceFlags nLocalServices; const int nMyStartingHeight; int nSendVersion{0}; NetPermissionFlags m_permissionFlags{PF_NONE}; // Used only by SocketHandler thread std::list vRecvMsg; mutable RecursiveMutex cs_addrName; std::string addrName GUARDED_BY(cs_addrName); // Our address, as reported by the peer CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; public: NodeId GetId() const { return id; } uint64_t GetLocalNonce() const { return nLocalHostNonce; } int GetMyStartingHeight() const { return nMyStartingHeight; } int GetRefCount() const { assert(nRefCount >= 0); return nRefCount; } bool ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete); void SetRecvVersion(int nVersionIn) { nRecvVersion = nVersionIn; } int GetRecvVersion() const { return nRecvVersion; } void SetSendVersion(int nVersionIn); int GetSendVersion() const; CService GetAddrLocal() const; //! May not be called more than once void SetAddrLocal(const CService &addrLocalIn); CNode *AddRef() { nRefCount++; return this; } void Release() { nRefCount--; } void AddAddressKnown(const CAddress &_addr) { addrKnown.insert(_addr.GetKey()); } void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand) { // Known checking here is only to save space from duplicates. // SendMessages will filter it again for knowns that were added // after addresses were pushed. if (_addr.IsValid() && !addrKnown.contains(_addr.GetKey())) { if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) { vAddrToSend[insecure_rand.randrange(vAddrToSend.size())] = _addr; } else { vAddrToSend.push_back(_addr); } } } void AddInventoryKnown(const CInv &inv) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); m_tx_relay->filterInventoryKnown.insert(inv.hash); } } void PushInventory(const CInv &inv) { if (inv.type == MSG_TX && m_tx_relay != nullptr) { const TxId txid(inv.hash); LOCK(m_tx_relay->cs_tx_inventory); if (!m_tx_relay->filterInventoryKnown.contains(txid)) { m_tx_relay->setInventoryTxToSend.insert(txid); } } else if (inv.type == MSG_BLOCK) { const BlockHash hash(inv.hash); LOCK(cs_inventory); vInventoryBlockToSend.push_back(hash); } } void PushBlockHash(const BlockHash &hash) { LOCK(cs_inventory); vBlockHashesToAnnounce.push_back(hash); } void CloseSocketDisconnect(); void copyStats(CNodeStats &stats); ServiceFlags GetLocalServices() const { return nLocalServices; } std::string GetAddrName() const; //! Sets the addrName only if it was not previously set void MaybeSetAddrName(const std::string &addrNameIn); }; /** * Return a timestamp in the future (in microseconds) for exponentially * distributed events. */ int64_t PoissonNextSend(int64_t now, int average_interval_seconds); std::string getSubVersionEB(uint64_t MaxBlockSize); std::string userAgent(const Config &config); #endif // BITCOIN_NET_H