diff --git a/src/avalanche/node.h b/src/avalanche/node.h index 506b0d386..5ef263a94 100644 --- a/src/avalanche/node.h +++ b/src/avalanche/node.h @@ -1,33 +1,33 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_NODE_H #define BITCOIN_AVALANCHE_NODE_H -#include // For NodeId +#include #include #include #include using PeerId = uint32_t; static constexpr PeerId NO_PEER = -1; using TimePoint = std::chrono::time_point; namespace avalanche { struct Node { NodeId nodeid; PeerId peerid; TimePoint nextRequestTime; Node(NodeId nodeid_, PeerId peerid_) : nodeid(nodeid_), peerid(peerid_), nextRequestTime(std::chrono::steady_clock::now()) {} }; } // namespace avalanche #endif // BITCOIN_AVALANCHE_NODE_H diff --git a/src/avalanche/peermanager.h b/src/avalanche/peermanager.h index fa93d5e13..638aeefdb 100644 --- a/src/avalanche/peermanager.h +++ b/src/avalanche/peermanager.h @@ -1,293 +1,293 @@ // Copyright (c) 2020 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PEERMANAGER_H #define BITCOIN_AVALANCHE_PEERMANAGER_H #include #include #include #include -#include #include #include +#include #include #include #include #include #include #include #include #include #include #include namespace avalanche { /** * Maximum number of stakes in the orphanProofs. * Benchmarking on a consumer grade computer shows that 10000 stakes can be * verified in less than 1 second. */ static constexpr size_t AVALANCHE_ORPHANPROOFPOOL_SIZE = 10000; class Delegation; namespace { struct TestPeerManager; } struct Slot { private: uint64_t start; uint32_t score; PeerId peerid; public: Slot(uint64_t startIn, uint32_t scoreIn, PeerId peeridIn) : start(startIn), score(scoreIn), peerid(peeridIn) {} Slot withStart(uint64_t startIn) const { return Slot(startIn, score, peerid); } Slot withScore(uint64_t scoreIn) const { return Slot(start, scoreIn, peerid); } Slot withPeerId(PeerId peeridIn) const { return Slot(start, score, peeridIn); } uint64_t getStart() const { return start; } uint64_t getStop() const { return start + score; } uint32_t getScore() const { return score; } PeerId getPeerId() const { return peerid; } bool contains(uint64_t slot) const { return getStart() <= slot && slot < getStop(); } bool precedes(uint64_t slot) const { return slot >= getStop(); } bool follows(uint64_t slot) const { return getStart() > slot; } }; struct Peer { PeerId peerid; uint32_t index = -1; uint32_t node_count = 0; std::shared_ptr proof; // The network stack uses timestamp in seconds, so we oblige. std::chrono::seconds registration_time; Peer(PeerId peerid_, std::shared_ptr proof_) : peerid(peerid_), proof(std::move(proof_)), registration_time(GetTime()) {} const ProofId &getProofId() const { return proof->getId(); } uint32_t getScore() const { return proof->getScore(); } }; struct proof_index { using result_type = ProofId; result_type operator()(const Peer &p) const { return p.proof->getId(); } }; struct next_request_time {}; struct PendingNode { ProofId proofid; NodeId nodeid; PendingNode(ProofId proofid_, NodeId nodeid_) : proofid(proofid_), nodeid(nodeid_){}; }; struct by_proofid; struct by_nodeid; namespace bmi = boost::multi_index; class PeerManager { std::vector slots; uint64_t slotCount = 0; uint64_t fragmentation = 0; /** * Several nodes can make an avalanche peer. In this case, all nodes are * considered interchangeable parts of the same peer. */ using PeerSet = boost::multi_index_container< Peer, bmi::indexed_by< // index by peerid bmi::hashed_unique>, // index by proof bmi::hashed_unique, proof_index, SaltedProofIdHasher>>>; PeerId nextPeerId = 0; PeerSet peers; std::unordered_map utxos; using NodeSet = boost::multi_index_container< Node, bmi::indexed_by< // index by nodeid bmi::hashed_unique>, // sorted by peerid/nextRequestTime bmi::ordered_non_unique< bmi::tag, bmi::composite_key< Node, bmi::member, bmi::member>>>>; NodeSet nodes; using PendingNodeSet = boost::multi_index_container< PendingNode, bmi::indexed_by< // index by proofid bmi::hashed_non_unique< bmi::tag, bmi::member, SaltedProofIdHasher>, // index by nodeid bmi::hashed_unique< bmi::tag, bmi::member>>>; PendingNodeSet pendingNodes; static constexpr int SELECT_PEER_MAX_RETRY = 3; static constexpr int SELECT_NODE_MAX_RETRY = 3; /** * Tracks proof which for which the UTXO are unavailable. */ OrphanProofPool orphanProofs{AVALANCHE_ORPHANPROOFPOOL_SIZE}; /** * Track proof ids to broadcast */ std::unordered_set m_unbroadcast_proofids; public: /** * Node API. */ bool addNode(NodeId nodeid, const ProofId &proofid); bool removeNode(NodeId nodeid); // Update when a node is to be polled next. bool updateNextRequestTime(NodeId nodeid, TimePoint timeout); // Randomly select a node to poll. NodeId selectNode(); template bool forNode(NodeId nodeid, Callable &&func) const { auto it = nodes.find(nodeid); return it != nodes.end() && func(*it); } template void forEachNode(const Peer &peer, Callable &&func) const { auto &nview = nodes.get(); auto range = nview.equal_range(peer.peerid); for (auto it = range.first; it != range.second; ++it) { func(*it); } } /** * Proof and Peer related API. */ bool registerProof(const std::shared_ptr &proof); bool exists(const ProofId &proofid) const { return getProof(proofid) != nullptr; } template bool forPeer(const ProofId &proofid, Callable &&func) const { auto &pview = peers.get(); auto it = pview.find(proofid); return it != pview.end() && func(*it); } template void forEachPeer(Callable &&func) const { for (const auto &p : peers) { func(p); } } /** * Update the peer set when a new block is connected. */ void updatedBlockTip(); /** * Proof broadcast API. */ void addUnbroadcastProof(const ProofId &proofid); void removeUnbroadcastProof(const ProofId &proofid); auto getUnbroadcastProofs() const { return m_unbroadcast_proofids; } /**************************************************** * Functions which are public for testing purposes. * ****************************************************/ /** * Provide the PeerId associated with the given proof. If the peer does not * exist, then it is created. */ PeerId getPeerId(const std::shared_ptr &proof); /** * Remove an existing peer. */ bool removePeer(const PeerId peerid); /** * Randomly select a peer to poll. */ PeerId selectPeer() const; /** * Trigger maintenance of internal data structures. * Returns how much slot space was saved after compaction. */ uint64_t compact(); /** * Perform consistency check on internal data structures. */ bool verify() const; // Accessors. uint64_t getSlotCount() const { return slotCount; } uint64_t getFragmentation() const { return fragmentation; } std::shared_ptr getProof(const ProofId &proofid) const; bool isOrphan(const ProofId &id) const; std::shared_ptr getOrphan(const ProofId &id) const; private: PeerSet::iterator fetchOrCreatePeer(const std::shared_ptr &proof); bool addOrUpdateNode(const PeerSet::iterator &it, NodeId nodeid); bool addNodeToPeer(const PeerSet::iterator &it); bool removeNodeFromPeer(const PeerSet::iterator &it, uint32_t count = 1); friend struct ::avalanche::TestPeerManager; }; /** * This is an internal method that is exposed for testing purposes. */ PeerId selectPeerImpl(const std::vector &slots, const uint64_t slot, const uint64_t max); } // namespace avalanche #endif // BITCOIN_AVALANCHE_PEERMANAGER_H diff --git a/src/avalanche/processor.h b/src/avalanche/processor.h index 4c57d0409..d5748c3fd 100644 --- a/src/avalanche/processor.h +++ b/src/avalanche/processor.h @@ -1,309 +1,311 @@ // Copyright (c) 2018-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_AVALANCHE_PROCESSOR_H #define BITCOIN_AVALANCHE_PROCESSOR_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class ArgsManager; -class Config; class CBlockIndex; +class CConnman; +class CNode; class CScheduler; +class Config; class PeerManager; struct bilingual_str; /** * Finalization score. */ static constexpr int AVALANCHE_FINALIZATION_SCORE = 128; /** * Maximum item that can be polled at once. */ static constexpr size_t AVALANCHE_MAX_ELEMENT_POLL = 16; /** * How long before we consider that a query timed out. */ static constexpr std::chrono::milliseconds AVALANCHE_DEFAULT_QUERY_TIMEOUT{ 10000}; /** * How many inflight requests can exist for one item. */ static constexpr int AVALANCHE_MAX_INFLIGHT_POLL = 10; namespace avalanche { class Delegation; class PeerManager; class Proof; /** * Vote history. */ struct VoteRecord { private: // confidence's LSB bit is the result. Higher bits are actual confidence // score. uint16_t confidence = 0; // Historical record of votes. uint8_t votes = 0; // Each bit indicate if the vote is to be considered. uint8_t consider = 0; // How many in flight requests exists for this element. mutable std::atomic inflight{0}; // Seed for pseudorandom operations. const uint32_t seed = 0; // Track how many successful votes occured. uint32_t successfulVotes = 0; // Track the nodes which are part of the quorum. std::array nodeFilter{{0, 0, 0, 0, 0, 0, 0, 0}}; public: explicit VoteRecord(bool accepted) : confidence(accepted) {} /** * Copy semantic */ VoteRecord(const VoteRecord &other) : confidence(other.confidence), votes(other.votes), consider(other.consider), inflight(other.inflight.load()), successfulVotes(other.successfulVotes), nodeFilter(other.nodeFilter) { } /** * Vote accounting facilities. */ bool isAccepted() const { return confidence & 0x01; } uint16_t getConfidence() const { return confidence >> 1; } bool hasFinalized() const { return getConfidence() >= AVALANCHE_FINALIZATION_SCORE; } /** * Register a new vote for an item and update confidence accordingly. * Returns true if the acceptance or finalization state changed. */ bool registerVote(NodeId nodeid, uint32_t error); /** * Register that a request is being made regarding that item. * The method is made const so that it can be accessed via a read only view * of vote_records. It's not a problem as it is made thread safe. */ bool registerPoll() const; /** * Return if this item is in condition to be polled at the moment. */ bool shouldPoll() const { return inflight < AVALANCHE_MAX_INFLIGHT_POLL; } /** * Clear `count` inflight requests. */ void clearInflightRequest(uint8_t count = 1) { inflight -= count; } private: /** * Add the node to the quorum. * Returns true if the node was added, false if the node already was in the * quorum. */ bool addNodeToQuorum(NodeId nodeid); }; class BlockUpdate { union { CBlockIndex *pindex; uintptr_t raw; }; static const size_t STATUS_BITS = 2; static const uintptr_t MASK = (1 << STATUS_BITS) - 1; static_assert( alignof(CBlockIndex) >= (1 << STATUS_BITS), "CBlockIndex alignement doesn't allow for Status to be stored."); public: enum Status : uint8_t { Invalid, Rejected, Accepted, Finalized, }; BlockUpdate(CBlockIndex *pindexIn, Status statusIn) : pindex(pindexIn) { raw |= statusIn; } Status getStatus() const { return Status(raw & MASK); } CBlockIndex *getBlockIndex() { return reinterpret_cast(raw & ~MASK); } const CBlockIndex *getBlockIndex() const { return const_cast(this)->getBlockIndex(); } }; using BlockVoteMap = std::map; struct query_timeout {}; namespace { struct AvalancheTest; } class Processor { CConnman *connman; std::chrono::milliseconds queryTimeoutDuration; /** * Blocks to run avalanche on. */ RWCollection vote_records; /** * Keep track of peers and queries sent. */ std::atomic round; /** * Keep track of the peers and associated infos. */ mutable Mutex cs_peerManager; std::unique_ptr peerManager GUARDED_BY(cs_peerManager); struct Query { NodeId nodeid; uint64_t round; TimePoint timeout; /** * We declare this as mutable so it can be modified in the multi_index. * This is ok because we do not use this field to index in anyway. * * /!\ Do not use any mutable field as index. */ mutable std::vector invs; }; using QuerySet = boost::multi_index_container< Query, boost::multi_index::indexed_by< // index by nodeid/round boost::multi_index::hashed_unique, boost::multi_index::member>>, // sorted by timeout boost::multi_index::ordered_non_unique< boost::multi_index::tag, boost::multi_index::member>>>; RWCollection queries; /** Data required to participate. */ struct PeerData; std::unique_ptr peerData; CKey sessionKey; /** Event loop machinery. */ EventLoop eventLoop; /** Registered interfaces::Chain::Notifications handler. */ class NotificationsHandler; std::unique_ptr chainNotificationsHandler; Processor(interfaces::Chain &chain, CConnman *connmanIn, std::unique_ptr peerDataIn, CKey sessionKeyIn); public: ~Processor(); static std::unique_ptr MakeProcessor(const ArgsManager &argsman, interfaces::Chain &chain, CConnman *connman, bilingual_str &error); void setQueryTimeoutDuration(std::chrono::milliseconds d) { queryTimeoutDuration = d; } bool addBlockToReconcile(const CBlockIndex *pindex); bool isAccepted(const CBlockIndex *pindex) const; int getConfidence(const CBlockIndex *pindex) const; // TODO: Refactor the API to remove the dependency on avalanche/protocol.h void sendResponse(CNode *pfrom, Response response) const; bool registerVotes(NodeId nodeid, const Response &response, std::vector &updates, int &banscore, std::string &error); template auto withPeerManager(Callable &&func) const { LOCK(cs_peerManager); return func(*peerManager); } CPubKey getSessionPubKey() const; bool sendHello(CNode *pfrom) const; std::shared_ptr getLocalProof() const; /* * Return whether the avalanche service flag should be set. */ bool isAvalancheServiceAvailable() { return !!peerData; } bool startEventLoop(CScheduler &scheduler); bool stopEventLoop(); private: void runEventLoop(); void clearTimedoutRequests(); std::vector getInvsForNextPoll(bool forPoll = true); NodeId getSuitableNodeToQuery(); /** * Build and return the challenge whose signature is included in the * AVAHELLO message that we send to a peer. */ uint256 buildLocalSighash(CNode *pfrom) const; friend struct ::avalanche::AvalancheTest; }; } // namespace avalanche #endif // BITCOIN_AVALANCHE_PROCESSOR_H diff --git a/src/net.h b/src/net.h index fdad53de1..d442a013f 100644 --- a/src/net.h +++ b/src/net.h @@ -1,1332 +1,1326 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NET_H #define BITCOIN_NET_H #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include // For cs_main #include #include #include #include #include #include #include #ifndef WIN32 #include #endif class BanMan; class Config; class CNode; class CScheduler; struct bilingual_str; /** Default for -whitelistrelay. */ static const bool DEFAULT_WHITELISTRELAY = true; /** Default for -whitelistforcerelay. */ static const bool DEFAULT_WHITELISTFORCERELAY = false; /** * Time after which to disconnect, after waiting for a ping response (or * inactivity). */ static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; /** The maximum number of new addresses to accumulate before announcing. */ static const unsigned int MAX_ADDR_TO_SEND = 1000; /** Maximum length of the user agent string in `version` message */ static const unsigned int MAX_SUBVERSION_LENGTH = 256; /** * Maximum number of automatic outgoing nodes over which we'll relay everything * (blocks, tx, addrs, etc) */ static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8; /** Maximum number of addnode outgoing nodes */ static const int MAX_ADDNODE_CONNECTIONS = 8; /** Maximum number of block-relay-only outgoing connections */ static const int MAX_BLOCKS_ONLY_CONNECTIONS = 2; /** Maximum number of feeler connections */ static const int MAX_FEELER_CONNECTIONS = 1; /** -listen default */ static const bool DEFAULT_LISTEN = true; /** -upnp default */ #ifdef USE_UPNP static const bool DEFAULT_UPNP = USE_UPNP; #else static const bool DEFAULT_UPNP = false; #endif /** * The maximum number of peer connections to maintain. * This quantity might not be reachable on some systems, especially on platforms * that do not provide a working poll() interface. */ static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 4096; /** The default for -maxuploadtarget. 0 = Unlimited */ static const uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; /** The default timeframe for -maxuploadtarget. 1 day. */ static const uint64_t MAX_UPLOAD_TIMEFRAME = 60 * 60 * 24; /** Default for blocks only*/ static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; static const bool DEFAULT_FORCEDNSSEED = false; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; /** Refresh period for the avalanche statistics computation */ static constexpr std::chrono::minutes AVALANCHE_STATISTICS_REFRESH_PERIOD{10}; /** Time constant for the avalanche statistics computation */ static constexpr std::chrono::minutes AVALANCHE_STATISTICS_TIME_CONSTANT{10}; /** * Pre-computed decay factor for the avalanche statistics computation. * There is currently no constexpr variant of std::exp, so use a const. */ static const double AVALANCHE_STATISTICS_DECAY_FACTOR = 1. - std::exp(-1. * AVALANCHE_STATISTICS_REFRESH_PERIOD.count() / AVALANCHE_STATISTICS_TIME_CONSTANT.count()); -typedef int64_t NodeId; - -/** - * Special NodeId that represent no node. - */ -static constexpr NodeId NO_NODE = -1; - struct AddedNodeInfo { std::string strAddedNode; CService resolvedAddress; bool fConnected; bool fInbound; }; struct CNodeStats; class CClientUIInterface; struct CSerializedNetMsg { CSerializedNetMsg() = default; CSerializedNetMsg(CSerializedNetMsg &&) = default; CSerializedNetMsg &operator=(CSerializedNetMsg &&) = default; // No copying, only moves. CSerializedNetMsg(const CSerializedNetMsg &msg) = delete; CSerializedNetMsg &operator=(const CSerializedNetMsg &) = delete; std::vector data; std::string m_type; }; /** * Different types of connections to a peer. This enum encapsulates the * information we have available at the time of opening or accepting the * connection. Aside from INBOUND, all types are initiated by us. */ enum class ConnectionType { /** * Inbound connections are those initiated by a peer. This is the only * property we know at the time of connection, until P2P messages are * exchanged. */ INBOUND, /** * These are the default connections that we use to connect with the * network. There is no restriction on what is relayed- by default we relay * blocks, addresses & transactions. We automatically attempt to open * MAX_OUTBOUND_FULL_RELAY_CONNECTIONS using addresses from our AddrMan. */ OUTBOUND_FULL_RELAY, /** * We open manual connections to addresses that users explicitly inputted * via the addnode RPC, or the -connect command line argument. Even if a * manual connection is misbehaving, we do not automatically disconnect or * add it to our discouragement filter. */ MANUAL, /** * Feeler connections are short lived connections used to increase the * number of connectable addresses in our AddrMan. Approximately every * FEELER_INTERVAL, we attempt to connect to a random address from the new * table. If successful, we add it to the tried table. */ FEELER, /** * We use block-relay-only connections to help prevent against partition * attacks. By not relaying transactions or addresses, these connections * are harder to detect by a third party, thus helping obfuscate the * network topology. We automatically attempt to open * MAX_BLOCK_RELAY_ONLY_CONNECTIONS using addresses from our AddrMan. */ BLOCK_RELAY, /** * AddrFetch connections are short lived connections used to solicit * addresses from peers. These are initiated to addresses submitted via the * -seednode command line argument, or under certain conditions when the * AddrMan is empty. */ ADDR_FETCH, }; namespace { struct CConnmanTest; } class NetEventsInterface; class CConnman { public: enum NumConnections { CONNECTIONS_NONE = 0, CONNECTIONS_IN = (1U << 0), CONNECTIONS_OUT = (1U << 1), CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT), }; struct Options { ServiceFlags nLocalServices = NODE_NONE; int nMaxConnections = 0; int m_max_outbound_full_relay = 0; int m_max_outbound_block_relay = 0; int nMaxAddnode = 0; int nMaxFeeler = 0; int nBestHeight = 0; CClientUIInterface *uiInterface = nullptr; NetEventsInterface *m_msgproc = nullptr; BanMan *m_banman = nullptr; unsigned int nSendBufferMaxSize = 0; unsigned int nReceiveFloodSize = 0; uint64_t nMaxOutboundTimeframe = 0; uint64_t nMaxOutboundLimit = 0; int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT; std::vector vSeedNodes; std::vector vWhitelistedRange; std::vector vWhiteBinds; std::vector vBinds; bool m_use_addrman_outgoing = true; std::vector m_specified_outgoing; std::vector m_added_nodes; std::vector m_asmap; }; void Init(const Options &connOptions) { nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; m_use_addrman_outgoing = connOptions.m_use_addrman_outgoing; nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; { // Lock cs_main to prevent a potential race with the peer validation // logic thread. LOCK(::cs_main); m_max_outbound_full_relay = std::min(connOptions.m_max_outbound_full_relay, connOptions.nMaxConnections); m_max_outbound_block_relay = connOptions.m_max_outbound_block_relay; m_max_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + nMaxFeeler; } nBestHeight = connOptions.nBestHeight; clientInterface = connOptions.uiInterface; m_banman = connOptions.m_banman; m_msgproc = connOptions.m_msgproc; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; m_peer_connect_timeout = connOptions.m_peer_connect_timeout; { LOCK(cs_totalBytesSent); nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; } vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(cs_vAddedNodes); vAddedNodes = connOptions.m_added_nodes; } } CConnman(const Config &configIn, uint64_t seed0, uint64_t seed1, bool network_active = true); ~CConnman(); bool Start(CScheduler &scheduler, const Options &options); void StopThreads(); void StopNodes(); void Stop() { StopThreads(); StopNodes(); }; void Interrupt(); bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); void OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *strDest, ConnectionType conn_type); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function func); void PushMessage(CNode *pnode, CSerializedNetMsg &&msg); template void ForEachNode(Callable &&func) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNode(Callable &&func) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; // Addrman functions void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress &addr); void AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty = 0); std::vector GetAddresses(); // This allows temporarily exceeding m_max_outbound_full_relay, with the // goal of finding a peer that is better than all our current peers. void SetTryNewOutboundPeer(bool flag); bool GetTryNewOutboundPeer(); // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may // return a value less than (num_outbound_connections - num_outbound_slots) // in cases where some outbound connections are not yet fully connected, or // not yet fully disconnected. int GetExtraOutboundCount(); bool AddNode(const std::string &node); bool RemoveAddedNode(const std::string &node); std::vector GetAddedNodeInfo(); size_t GetNodeCount(NumConnections num); void GetNodeStats(std::vector &vstats); bool DisconnectNode(const std::string &node); bool DisconnectNode(const CSubNet &subnet); bool DisconnectNode(const CNetAddr &addr); bool DisconnectNode(NodeId id); //! Used to convey which local services we are offering peers during node //! connection. //! //! The data returned by this is used in CNode construction, //! which is used to advertise which services we are offering //! that peer during `net_processing.cpp:PushNodeVersion()`. ServiceFlags GetLocalServices() const; //! set the max outbound target in bytes. void SetMaxOutboundTarget(uint64_t limit); uint64_t GetMaxOutboundTarget(); //! set the timeframe for the max outbound target. void SetMaxOutboundTimeframe(uint64_t timeframe); uint64_t GetMaxOutboundTimeframe(); //! check if the outbound target is reached. If param //! historicalBlockServingLimit is set true, the function will response true //! if the limit for serving historical blocks has been reached. bool OutboundTargetReached(bool historicalBlockServingLimit); //! response the bytes left in the current max outbound cycle in case of no //! limit, it will always response 0 uint64_t GetOutboundTargetBytesLeft(); //! response the time in second left in the current max outbound cycle in //! case of no limit, it will always response 0 uint64_t GetMaxOutboundTimeLeftInCycle(); uint64_t GetTotalBytesRecv(); uint64_t GetTotalBytesSent(); void SetBestHeight(int height); int GetBestHeight() const; /** Get a unique deterministic randomizer. */ CSipHasher GetDeterministicRandomizer(uint64_t id) const; unsigned int GetReceiveFloodSize() const; void WakeMessageHandler(); /** * Attempts to obfuscate tx time through exponentially distributed emitting. * Works assuming that a single interval is used. * Variable intervals will result in privacy decrease. */ int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds); void SetAsmap(std::vector asmap) { addrman.m_asmap = std::move(asmap); } private: struct ListenSocket { public: SOCKET socket; inline void AddSocketPermissionFlags(NetPermissionFlags &flags) const { NetPermissions::AddFlag(flags, m_permissions); } ListenSocket(SOCKET socket_, NetPermissionFlags permissions_) : socket(socket_), m_permissions(permissions_) {} private: NetPermissionFlags m_permissions; }; bool BindListenPort(const CService &bindAddr, bilingual_str &strError, NetPermissionFlags permissions); bool Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector &binds, const std::vector &whiteBinds); void ThreadOpenAddedConnections(); void AddAddrFetch(const std::string &strDest); void ProcessAddrFetch(); void ThreadOpenConnections(std::vector connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket &hListenSocket); void DisconnectNodes(); void NotifyNumConnectionsChanged(); void InactivityCheck(CNode *pnode); bool GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketHandler(); void ThreadSocketHandler(); void ThreadDNSAddressSeed(); uint64_t CalculateKeyedNetGroup(const CAddress &ad) const; CNode *FindNode(const CNetAddr &ip); CNode *FindNode(const CSubNet &subNet); CNode *FindNode(const std::string &addrName); CNode *FindNode(const CService &addr); bool AttemptToEvictConnection(); CNode *ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type); void AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const; void DeleteNode(CNode *pnode); NodeId GetNewNodeId(); size_t SocketSendData(CNode *pnode) const; void DumpAddresses(); // Network stats void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); // Whether the node should be passed out in ForEach* callbacks static bool NodeFullyConnected(const CNode *pnode); const Config *config; // Network usage totals RecursiveMutex cs_totalBytesRecv; RecursiveMutex cs_totalBytesSent; uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv){0}; uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent){0}; // outbound limit & stats uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundTimeframe GUARDED_BY(cs_totalBytesSent); // P2P timeout in seconds int64_t m_peer_connect_timeout; // Whitelisted ranges. Any node connecting from these is automatically // whitelisted (as well as those connecting to whitelisted binds). std::vector vWhitelistedRange; unsigned int nSendBufferMaxSize{0}; unsigned int nReceiveFloodSize{0}; std::vector vhListenSocket; std::atomic fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; std::deque m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); RecursiveMutex m_addr_fetches_mutex; std::vector vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector vNodes GUARDED_BY(cs_vNodes); std::list vNodesDisconnected; mutable RecursiveMutex cs_vNodes; std::atomic nLastNodeId{0}; unsigned int nPrevNodeCount{0}; /** * Services this instance offers. * * This data is replicated in each CNode instance we create during peer * connection (in ConnectNode()) under a member also called * nLocalServices. * * This data is not marked const, but after being set it should not * change. See the note in CNode::nLocalServices documentation. * * \sa CNode::nLocalServices */ ServiceFlags nLocalServices; std::unique_ptr semOutbound; std::unique_ptr semAddnode; int nMaxConnections; // How many full-relay (tx, block, addr) outbound peers we want int m_max_outbound_full_relay; // How many block-relay only outbound peers we want // We do not relay tx or addr messages with these peers int m_max_outbound_block_relay; int nMaxAddnode; int nMaxFeeler; int m_max_outbound; bool m_use_addrman_outgoing; std::atomic nBestHeight; CClientUIInterface *clientInterface; NetEventsInterface *m_msgproc; /** * Pointer to this node's banman. May be nullptr - check existence before * dereferencing. */ BanMan *m_banman; /** SipHasher seeds for deterministic randomness */ const uint64_t nSeed0, nSeed1; /** flag for waking the message processor. */ bool fMsgProcWake GUARDED_BY(mutexMsgProc); std::condition_variable condMsgProc; Mutex mutexMsgProc; std::atomic flagInterruptMsgProc{false}; CThreadInterrupt interruptNet; std::thread threadDNSAddressSeed; std::thread threadSocketHandler; std::thread threadOpenAddedConnections; std::thread threadOpenConnections; std::thread threadMessageHandler; /** * flag for deciding to connect to an extra outbound peer, in excess of * m_max_outbound_full_relay. This takes the place of a feeler connection. */ std::atomic_bool m_try_another_outbound_peer; std::atomic m_next_send_inv_to_incoming{0}; friend struct ::CConnmanTest; friend struct ConnmanTestMsg; }; void Discover(); void StartMapPort(); void InterruptMapPort(); void StopMapPort(); unsigned short GetListenPort(); /** * Interface for message handling */ class NetEventsInterface { public: virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual bool SendMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual void InitializeNode(const Config &config, CNode *pnode) = 0; virtual void FinalizeNode(const Config &config, NodeId id, bool &update_connection_time) = 0; protected: /** * Protected destructor so that instances can only be deleted by derived * classes. If that restriction is no longer desired, this should be made * public and virtual. */ ~NetEventsInterface() = default; }; enum { // unknown LOCAL_NONE, // address a local interface listens on LOCAL_IF, // address explicit bound to LOCAL_BIND, // address reported by UPnP LOCAL_UPNP, // address explicitly specified (-externalip=) LOCAL_MANUAL, LOCAL_MAX }; bool IsPeerAddrLocalGood(CNode *pnode); void AdvertiseLocal(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) * @note Networks are reachable by default */ void SetReachable(enum Network net, bool reachable); /** @returns true if the network is reachable, false otherwise */ bool IsReachable(enum Network net); /** @returns true if the address is in a reachable network, false otherwise */ bool IsReachable(const CNetAddr &addr); bool AddLocal(const CService &addr, int nScore = LOCAL_NONE); bool AddLocal(const CNetAddr &addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService &addr); bool SeenLocal(const CService &addr); bool IsLocal(const CService &addr); bool GetLocal(CService &addr, const CNetAddr *paddrPeer = nullptr); CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices); extern bool fDiscover; extern bool fListen; extern bool g_relay_txes; struct LocalServiceInfo { int nScore; int nPort; }; extern RecursiveMutex cs_mapLocalHost; extern std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); extern const std::string NET_MESSAGE_COMMAND_OTHER; // Command, total bytes typedef std::map mapMsgCmdSize; /** * POD that contains various stats about a node. * Usually constructed from CConman::GetNodeStats. Stats are filled from the * node using CNode::copyStats. */ struct CNodeStats { NodeId nodeid; ServiceFlags nServices; bool fRelayTxes; int64_t nLastSend; int64_t nLastRecv; int64_t nLastTXTime; int64_t nLastProofTime; int64_t nLastBlockTime; int64_t nTimeConnected; int64_t nTimeOffset; std::string addrName; int nVersion; std::string cleanSubVer; bool fInbound; bool m_manual_connection; int nStartingHeight; uint64_t nSendBytes; mapMsgCmdSize mapSendBytesPerMsgCmd; uint64_t nRecvBytes; mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; bool m_legacyWhitelisted; int64_t m_ping_usec; int64_t m_ping_wait_usec; int64_t m_min_ping_usec; Amount minFeeFilter; // Our address, as reported by the peer std::string addrLocal; // Address of this peer CAddress addr; // Bind address of our side of the connection CAddress addrBind; uint32_t m_mapped_as; }; /** * Transport protocol agnostic message container. * Ideally it should only contain receive time, payload, * command and size. */ class CNetMessage { public: //! received message data CDataStream m_recv; //! time of message receipt std::chrono::microseconds m_time{0}; bool m_valid_netmagic = false; bool m_valid_header = false; bool m_valid_checksum = false; //! size of the payload uint32_t m_message_size{0}; //! used wire size of the message (including header/checksum) uint32_t m_raw_message_size{0}; std::string m_command; CNetMessage(CDataStream &&recv_in) : m_recv(std::move(recv_in)) {} void SetVersion(int nVersionIn) { m_recv.SetVersion(nVersionIn); } }; /** * The TransportDeserializer takes care of holding and deserializing the * network receive buffer. It can deserialize the network buffer into a * transport protocol agnostic CNetMessage (command & payload) */ class TransportDeserializer { public: // returns true if the current deserialization is complete virtual bool Complete() const = 0; // set the serialization context version virtual void SetVersion(int version) = 0; // read and deserialize data virtual int Read(const Config &config, const char *data, uint32_t bytes) = 0; // decomposes a message from the context virtual CNetMessage GetMessage(const Config &config, std::chrono::microseconds time) = 0; virtual ~TransportDeserializer() {} }; class V1TransportDeserializer final : public TransportDeserializer { private: mutable CHash256 hasher; mutable uint256 data_hash; // Parsing header (false) or data (true) bool in_data; // Partially received header. CDataStream hdrbuf; // Complete header. CMessageHeader hdr; // Received message data. CDataStream vRecv; uint32_t nHdrPos; uint32_t nDataPos; const uint256 &GetMessageHash() const; int readHeader(const Config &config, const char *pch, uint32_t nBytes); int readData(const char *pch, uint32_t nBytes); void Reset() { vRecv.clear(); hdrbuf.clear(); hdrbuf.resize(24); in_data = false; nHdrPos = 0; nDataPos = 0; data_hash.SetNull(); hasher.Reset(); } public: V1TransportDeserializer( const CMessageHeader::MessageMagic &pchMessageStartIn, int nTypeIn, int nVersionIn) : hdrbuf(nTypeIn, nVersionIn), hdr(pchMessageStartIn), vRecv(nTypeIn, nVersionIn) { Reset(); } bool Complete() const override { if (!in_data) { return false; } return (hdr.nMessageSize == nDataPos); } void SetVersion(int nVersionIn) override { hdrbuf.SetVersion(nVersionIn); vRecv.SetVersion(nVersionIn); } int Read(const Config &config, const char *pch, uint32_t nBytes) override { int ret = in_data ? readData(pch, nBytes) : readHeader(config, pch, nBytes); if (ret < 0) { Reset(); } return ret; } CNetMessage GetMessage(const Config &config, std::chrono::microseconds time) override; }; /** * The TransportSerializer prepares messages for the network transport */ class TransportSerializer { public: // prepare message for transport (header construction, error-correction // computation, payload encryption, etc.) virtual void prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) = 0; virtual ~TransportSerializer() {} }; class V1TransportSerializer : public TransportSerializer { public: void prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) override; }; /** Information about a peer */ class CNode { friend class CConnman; friend struct ConnmanTestMsg; public: std::unique_ptr m_deserializer; std::unique_ptr m_serializer; // socket std::atomic nServices{NODE_NONE}; SOCKET hSocket GUARDED_BY(cs_hSocket); // Total size of all vSendMsg entries. size_t nSendSize{0}; // Offset inside the first vSendMsg already sent. size_t nSendOffset{0}; uint64_t nSendBytes GUARDED_BY(cs_vSend){0}; std::deque> vSendMsg GUARDED_BY(cs_vSend); Mutex cs_vSend; Mutex cs_hSocket; Mutex cs_vRecv; RecursiveMutex cs_vProcessMsg; std::list vProcessMsg GUARDED_BY(cs_vProcessMsg); size_t nProcessQueueSize{0}; RecursiveMutex cs_sendProcessing; std::deque vRecvGetData; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic nLastSend{0}; std::atomic nLastRecv{0}; const int64_t nTimeConnected; std::atomic nTimeOffset{0}; // Address of this peer const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; std::atomic nVersion{0}; // The nonce provided by the remote host. uint64_t nRemoteHostNonce{0}; // The extra entropy provided by the remote host. uint64_t nRemoteExtraEntropy{0}; /** * cleanSubVer is a sanitized string of the user agent byte array we read * from the wire. This cleaned string can safely be logged or displayed. */ RecursiveMutex cs_SubVer; std::string cleanSubVer GUARDED_BY(cs_SubVer){}; // This peer is preferred for eviction. bool m_prefer_evict{false}; bool HasPermission(NetPermissionFlags permission) const { return NetPermissions::HasFlag(m_permissionFlags, permission); } // This boolean is unusued in actual processing, only present for backward // compatibility at RPC/QT level bool m_legacyWhitelisted{false}; // set by version message bool fClient{false}; // after BIP159, set by version message bool m_limited_node{false}; /** * Whether the peer has signaled support for receiving ADDRv2 (BIP155) * messages, implying a preference to receive ADDRv2 instead of ADDR ones. */ std::atomic_bool m_wants_addrv2{false}; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs std::atomic_bool fDisconnect{false}; bool fSentAddr{false}; CSemaphoreGrant grantOutbound; std::atomic nRefCount{0}; const uint64_t nKeyedNetGroup; std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; bool IsOutboundOrBlockRelayConn() const { switch (m_conn_type) { case ConnectionType::OUTBOUND_FULL_RELAY: case ConnectionType::BLOCK_RELAY: return true; case ConnectionType::INBOUND: case ConnectionType::MANUAL: case ConnectionType::ADDR_FETCH: case ConnectionType::FEELER: return false; } // no default case, so the compiler can warn about missing cases assert(false); } bool IsFullOutboundConn() const { return m_conn_type == ConnectionType::OUTBOUND_FULL_RELAY; } bool IsManualConn() const { return m_conn_type == ConnectionType::MANUAL; } bool IsBlockOnlyConn() const { return m_conn_type == ConnectionType::BLOCK_RELAY; } bool IsFeelerConn() const { return m_conn_type == ConnectionType::FEELER; } bool IsAddrFetchConn() const { return m_conn_type == ConnectionType::ADDR_FETCH; } bool IsInboundConn() const { return m_conn_type == ConnectionType::INBOUND; } /* Whether we send addr messages over this connection */ bool RelayAddrsWithConn() const { return m_conn_type != ConnectionType::BLOCK_RELAY; } bool ExpectServicesFromConn() const { switch (m_conn_type) { case ConnectionType::INBOUND: case ConnectionType::MANUAL: case ConnectionType::FEELER: return false; case ConnectionType::OUTBOUND_FULL_RELAY: case ConnectionType::BLOCK_RELAY: case ConnectionType::ADDR_FETCH: return true; } // no default case, so the compiler can warn about missing cases assert(false); } protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); public: BlockHash hashContinue; std::atomic nStartingHeight{-1}; // flood relay std::vector vAddrToSend; std::unique_ptr m_addr_known = nullptr; bool fGetAddr{false}; std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0}; std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0}; // List of block ids we still have to announce. // There is no final sorting before sending, as they are always sent // immediately and in the order requested. std::vector vInventoryBlockToSend GUARDED_BY(cs_inventory); Mutex cs_inventory; struct TxRelay { mutable RecursiveMutex cs_filter; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's // version message. // b) the peer may tell us in its version message that we should not // relay tx invs unless it loads a bloom filter. bool fRelayTxes GUARDED_BY(cs_filter){false}; std::unique_ptr pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter){nullptr}; mutable RecursiveMutex cs_tx_inventory; CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){ 50000, 0.000001}; // Set of transaction ids we still have to announce. // They are sorted by the mempool before relay, so the order is not // important. std::set setInventoryTxToSend GUARDED_BY(cs_tx_inventory); // Used for BIP35 mempool sending bool fSendMempool GUARDED_BY(cs_tx_inventory){false}; // Last time a "MEMPOOL" request was serviced. std::atomic m_last_mempool_req{ std::chrono::seconds{0}}; std::chrono::microseconds nNextInvSend{0}; RecursiveMutex cs_feeFilter; // Minimum fee rate with which to filter inv's to this node Amount minFeeFilter GUARDED_BY(cs_feeFilter){Amount::zero()}; Amount lastSentFeeFilter{Amount::zero()}; int64_t nextSendTimeFeeFilter{0}; }; // m_tx_relay == nullptr if we're not relaying transactions with this peer std::unique_ptr m_tx_relay; struct ProofRelay { mutable RecursiveMutex cs_proof_inventory; std::set setInventoryProofToSend GUARDED_BY(cs_proof_inventory); // Prevent sending proof invs if the peer already knows about them CRollingBloomFilter filterProofKnown GUARDED_BY(cs_proof_inventory){ 10000, 0.000001}; std::chrono::microseconds nextInvSend{0}; }; // m_proof_relay == nullptr if we're not relaying proofs with this peer std::unique_ptr m_proof_relay; class AvalancheState { /** * The inventories polled and voted couters since last score * computation, stored as a pair of uint32_t with the poll counter * being the 32 lowest bits and the vote counter the 32 highest bits. */ std::atomic invCounters; /** The last computed score */ std::atomic availabilityScore; /** * Protect the sequence of operations required for updating the * statistics. */ Mutex cs_statistics; public: CPubKey pubkey; AvalancheState() : invCounters(0), availabilityScore(0.) {} /** The node was polled for count invs */ void invsPolled(uint32_t count); /** The node voted for count invs */ void invsVoted(uint32_t count); /** * The availability score is calculated using an exponentially weighted * average. * This has several interesting properties: * - The most recent polls/responses have more weight than the previous * ones. A node that recently stopped answering will see its ratio * decrease quickly. * - This is a low-pass filter, so it causes delay. This means that a * node needs to have a track record for the ratio to be high. A node * that has been little requested will have a lower ratio than a node * that failed to answer a few polls but answered a lot of them. * - It is cheap to compute. * * This is expected to be called at a fixed interval of * AVALANCHE_STATISTICS_REFRESH_PERIOD. */ void updateAvailabilityScore(); double getAvailabilityScore() const; }; // m_avalanche_state == nullptr if we're not using avalanche with this peer std::unique_ptr m_avalanche_state; // Used for headers announcements - unfiltered blocks to relay std::vector vBlockHashesToAnnounce GUARDED_BY(cs_inventory); /** * UNIX epoch time of the last block received from this peer that we had * not yet seen (e.g. not already received from another peer), that passed * preliminary validity checks and was saved to disk, even if we don't * connect the block or it eventually fails connection. Used as an inbound * peer eviction criterium in CConnman::AttemptToEvictConnection. */ std::atomic nLastBlockTime{0}; /** * UNIX epoch time of the last transaction received from this peer that we * had not yet seen (e.g. not already received from another peer) and that * was accepted into our mempool. Used as an inbound peer eviction criterium * in CConnman::AttemptToEvictConnection. */ std::atomic nLastTXTime{0}; /** * UNIX epoch time of the last proof received from this peer that we * had not yet seen (e.g. not already received from another peer) and that * was accepted into our proof pool. Used as an inbound peer eviction * criterium in CConnman::AttemptToEvictConnection. */ std::atomic nLastProofTime{0}; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. std::atomic nPingNonceSent{0}; /** When the last ping was sent, or 0 if no ping was ever sent */ std::atomic m_ping_start{ std::chrono::microseconds{0}}; // Last measured round-trip time. std::atomic nPingUsecTime{0}; // Best measured round-trip time. std::atomic nMinPingUsecTime{std::numeric_limits::max()}; // Whether a ping is requested. std::atomic fPingQueued{false}; std::set orphan_work_set; CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, uint64_t nLocalExtraEntropyIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in); ~CNode(); CNode(const CNode &) = delete; CNode &operator=(const CNode &) = delete; private: const NodeId id; const uint64_t nLocalHostNonce; const uint64_t nLocalExtraEntropy; const ConnectionType m_conn_type; std::atomic m_greatest_common_version{INIT_PROTO_VERSION}; //! Services offered to this peer. //! //! This is supplied by the parent CConnman during peer connection //! (CConnman::ConnectNode()) from its attribute of the same name. //! //! This is const because there is no protocol defined for renegotiating //! services initially offered to a peer. The set of local services we //! offer should not change after initialization. //! //! An interesting example of this is NODE_NETWORK and initial block //! download: a node which starts up from scratch doesn't have any blocks //! to serve, but still advertises NODE_NETWORK because it will eventually //! fulfill this role after IBD completes. P2P code is written in such a //! way that it can gracefully handle peers who don't make good on their //! service advertisements. const ServiceFlags nLocalServices; const int nMyStartingHeight; NetPermissionFlags m_permissionFlags{PF_NONE}; // Used only by SocketHandler thread std::list vRecvMsg; mutable RecursiveMutex cs_addrName; std::string addrName GUARDED_BY(cs_addrName); // Our address, as reported by the peer CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; public: NodeId GetId() const { return id; } uint64_t GetLocalNonce() const { return nLocalHostNonce; } uint64_t GetLocalExtraEntropy() const { return nLocalExtraEntropy; } int GetMyStartingHeight() const { return nMyStartingHeight; } int GetRefCount() const { assert(nRefCount >= 0); return nRefCount; } bool ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete); void SetCommonVersion(int greatest_common_version) { Assume(m_greatest_common_version == INIT_PROTO_VERSION); m_greatest_common_version = greatest_common_version; } int GetCommonVersion() const { return m_greatest_common_version; } CService GetAddrLocal() const; //! May not be called more than once void SetAddrLocal(const CService &addrLocalIn); CNode *AddRef() { nRefCount++; return this; } void Release() { nRefCount--; } void AddAddressKnown(const CAddress &_addr) { assert(m_addr_known); m_addr_known->insert(_addr.GetKey()); } void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand) { // Whether the peer supports the address in `_addr`. For example, // nodes that do not implement BIP155 cannot receive Tor v3 addresses // because they require ADDRv2 (BIP155) encoding. const bool addr_format_supported = m_wants_addrv2 || _addr.IsAddrV1Compatible(); // Known checking here is only to save space from duplicates. // SendMessages will filter it again for knowns that were added // after addresses were pushed. assert(m_addr_known); if (_addr.IsValid() && !m_addr_known->contains(_addr.GetKey()) && addr_format_supported) { if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) { vAddrToSend[insecure_rand.randrange(vAddrToSend.size())] = _addr; } else { vAddrToSend.push_back(_addr); } } } void AddKnownTx(const TxId &txid) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); m_tx_relay->filterInventoryKnown.insert(txid); } } void PushTxInventory(const TxId &txid) { if (m_tx_relay == nullptr) { return; } LOCK(m_tx_relay->cs_tx_inventory); if (!m_tx_relay->filterInventoryKnown.contains(txid)) { m_tx_relay->setInventoryTxToSend.insert(txid); } } void AddKnownProof(const avalanche::ProofId &proofid) { if (m_proof_relay != nullptr) { LOCK(m_proof_relay->cs_proof_inventory); m_proof_relay->filterProofKnown.insert(proofid); } } void PushProofInventory(const avalanche::ProofId &proofid) { if (m_proof_relay == nullptr) { return; } LOCK(m_proof_relay->cs_proof_inventory); if (!m_proof_relay->filterProofKnown.contains(proofid)) { m_proof_relay->setInventoryProofToSend.insert(proofid); } } void CloseSocketDisconnect(); void copyStats(CNodeStats &stats, const std::vector &m_asmap); ServiceFlags GetLocalServices() const { return nLocalServices; } std::string GetAddrName() const; //! Sets the addrName only if it was not previously set void MaybeSetAddrName(const std::string &addrNameIn); }; /** * Return a timestamp in the future (in microseconds) for exponentially * distributed events. */ int64_t PoissonNextSend(int64_t now, int average_interval_seconds); /** Wrapper to return mockable type */ inline std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval) { return std::chrono::microseconds{ PoissonNextSend(now.count(), average_interval.count())}; } std::string getSubVersionEB(uint64_t MaxBlockSize); std::string userAgent(const Config &config); struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; int64_t nMinPingUsecTime; int64_t nLastBlockTime; int64_t nLastProofTime; int64_t nLastTXTime; bool fRelevantServices; bool fRelayTxes; bool fBloomFilter; uint64_t nKeyedNetGroup; bool prefer_evict; bool m_is_local; }; [[nodiscard]] std::optional SelectNodeToEvict(std::vector &&vEvictionCandidates); #endif // BITCOIN_NET_H diff --git a/src/nodeid.h b/src/nodeid.h new file mode 100644 index 000000000..fde832648 --- /dev/null +++ b/src/nodeid.h @@ -0,0 +1,17 @@ +// Copyright (c) 2021 The Bitcoin developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_NODEID_H +#define BITCOIN_NODEID_H + +#include + +typedef int64_t NodeId; + +/** + * Special NodeId that represent no node. + */ +static constexpr NodeId NO_NODE = -1; + +#endif // BITCOIN_NODEID_H