diff --git a/src/net.h b/src/net.h index cabc721f5..63c852d0e 100644 --- a/src/net.h +++ b/src/net.h @@ -1,1175 +1,1187 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NET_H #define BITCOIN_NET_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include // For cs_main #include #include #include #include #include #include #ifndef WIN32 #include #endif class BanMan; class Config; class CNode; class CScheduler; struct bilingual_str; /** Default for -whitelistrelay. */ static const bool DEFAULT_WHITELISTRELAY = true; /** Default for -whitelistforcerelay. */ static const bool DEFAULT_WHITELISTFORCERELAY = false; /** * Time after which to disconnect, after waiting for a ping response (or * inactivity). */ static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; /** The maximum number of new addresses to accumulate before announcing. */ static const unsigned int MAX_ADDR_TO_SEND = 1000; /** Maximum length of the user agent string in `version` message */ static const unsigned int MAX_SUBVERSION_LENGTH = 256; /** * Maximum number of automatic outgoing nodes over which we'll relay everything * (blocks, tx, addrs, etc) */ static const int MAX_OUTBOUND_FULL_RELAY_CONNECTIONS = 8; /** Maximum number of addnode outgoing nodes */ static const int MAX_ADDNODE_CONNECTIONS = 8; /** Maximum number of block-relay-only outgoing connections */ static const int MAX_BLOCKS_ONLY_CONNECTIONS = 2; /** Maximum number of feeler connections */ static const int MAX_FEELER_CONNECTIONS = 1; /** -listen default */ static const bool DEFAULT_LISTEN = true; /** -upnp default */ #ifdef USE_UPNP static const bool DEFAULT_UPNP = USE_UPNP; #else static const bool DEFAULT_UPNP = false; #endif /** * The maximum number of peer connections to maintain. * This quantity might not be reachable on some systems, especially on platforms * that do not provide a working poll() interface. */ static const unsigned int DEFAULT_MAX_PEER_CONNECTIONS = 4096; /** The default for -maxuploadtarget. 0 = Unlimited */ static const uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; /** The default timeframe for -maxuploadtarget. 1 day. */ static const uint64_t MAX_UPLOAD_TIMEFRAME = 60 * 60 * 24; /** Default for blocks only*/ static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; static const bool DEFAULT_FORCEDNSSEED = false; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; typedef int64_t NodeId; /** * Special NodeId that represent no node. */ static constexpr NodeId NO_NODE = -1; struct AddedNodeInfo { std::string strAddedNode; CService resolvedAddress; bool fConnected; bool fInbound; }; struct CNodeStats; class CClientUIInterface; struct CSerializedNetMsg { CSerializedNetMsg() = default; CSerializedNetMsg(CSerializedNetMsg &&) = default; CSerializedNetMsg &operator=(CSerializedNetMsg &&) = default; // No copying, only moves. CSerializedNetMsg(const CSerializedNetMsg &msg) = delete; CSerializedNetMsg &operator=(const CSerializedNetMsg &) = delete; std::vector data; std::string m_type; }; /** * Different types of connections to a peer. This enum encapsulates the * information we have available at the time of opening or accepting the * connection. Aside from INBOUND, all types are initiated by us. */ enum class ConnectionType { /** Peer initiated connections. */ INBOUND, /** * Full relay connections (blocks, addrs, txns) made automatically. * Addresses selected from AddrMan. */ OUTBOUND, /** * Connections to addresses added via addnode or the connect command line * argument. */ MANUAL, /** Short lived connections used to test address validity. */ FEELER, /** * Only relay blocks to these automatic outbound connections. * Addresses selected from AddrMan. */ BLOCK_RELAY, /** * Short lived connections used to solicit addrs when starting the node * without a populated AddrMan. */ ADDR_FETCH, }; namespace { struct CConnmanTest; } class NetEventsInterface; class CConnman { public: enum NumConnections { CONNECTIONS_NONE = 0, CONNECTIONS_IN = (1U << 0), CONNECTIONS_OUT = (1U << 1), CONNECTIONS_ALL = (CONNECTIONS_IN | CONNECTIONS_OUT), }; struct Options { ServiceFlags nLocalServices = NODE_NONE; int nMaxConnections = 0; int m_max_outbound_full_relay = 0; int m_max_outbound_block_relay = 0; int nMaxAddnode = 0; int nMaxFeeler = 0; int nBestHeight = 0; CClientUIInterface *uiInterface = nullptr; NetEventsInterface *m_msgproc = nullptr; BanMan *m_banman = nullptr; unsigned int nSendBufferMaxSize = 0; unsigned int nReceiveFloodSize = 0; uint64_t nMaxOutboundTimeframe = 0; uint64_t nMaxOutboundLimit = 0; int64_t m_peer_connect_timeout = DEFAULT_PEER_CONNECT_TIMEOUT; std::vector vSeedNodes; std::vector vWhitelistedRange; std::vector vWhiteBinds; std::vector vBinds; bool m_use_addrman_outgoing = true; std::vector m_specified_outgoing; std::vector m_added_nodes; std::vector m_asmap; }; void Init(const Options &connOptions) { nLocalServices = connOptions.nLocalServices; nMaxConnections = connOptions.nMaxConnections; m_use_addrman_outgoing = connOptions.m_use_addrman_outgoing; nMaxAddnode = connOptions.nMaxAddnode; nMaxFeeler = connOptions.nMaxFeeler; { // Lock cs_main to prevent a potential race with the peer validation // logic thread. LOCK(::cs_main); m_max_outbound_full_relay = std::min(connOptions.m_max_outbound_full_relay, connOptions.nMaxConnections); m_max_outbound_block_relay = connOptions.m_max_outbound_block_relay; m_max_outbound = m_max_outbound_full_relay + m_max_outbound_block_relay + nMaxFeeler; } nBestHeight = connOptions.nBestHeight; clientInterface = connOptions.uiInterface; m_banman = connOptions.m_banman; m_msgproc = connOptions.m_msgproc; nSendBufferMaxSize = connOptions.nSendBufferMaxSize; nReceiveFloodSize = connOptions.nReceiveFloodSize; m_peer_connect_timeout = connOptions.m_peer_connect_timeout; { LOCK(cs_totalBytesSent); nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe; nMaxOutboundLimit = connOptions.nMaxOutboundLimit; } vWhitelistedRange = connOptions.vWhitelistedRange; { LOCK(cs_vAddedNodes); vAddedNodes = connOptions.m_added_nodes; } } CConnman(const Config &configIn, uint64_t seed0, uint64_t seed1); ~CConnman(); bool Start(CScheduler &scheduler, const Options &options); void StopThreads(); void StopNodes(); void Stop() { StopThreads(); StopNodes(); }; void Interrupt(); bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); void OpenNetworkConnection(const CAddress &addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, ConnectionType conn_type = ConnectionType::OUTBOUND); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function func); void PushMessage(CNode *pnode, CSerializedNetMsg &&msg); template void ForEachNode(Callable &&func) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNode(Callable &&func) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { func(node); } } }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; template void ForEachNodeThen(Callable &&pre, CallableAfter &&post) const { LOCK(cs_vNodes); for (auto &&node : vNodes) { if (NodeFullyConnected(node)) { pre(node); } } post(); }; // Addrman functions void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress &addr); void AddNewAddresses(const std::vector &vAddr, const CAddress &addrFrom, int64_t nTimePenalty = 0); std::vector GetAddresses(); // This allows temporarily exceeding m_max_outbound_full_relay, with the // goal of finding a peer that is better than all our current peers. void SetTryNewOutboundPeer(bool flag); bool GetTryNewOutboundPeer(); // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may // return a value less than (num_outbound_connections - num_outbound_slots) // in cases where some outbound connections are not yet fully connected, or // not yet fully disconnected. int GetExtraOutboundCount(); bool AddNode(const std::string &node); bool RemoveAddedNode(const std::string &node); std::vector GetAddedNodeInfo(); size_t GetNodeCount(NumConnections num); void GetNodeStats(std::vector &vstats); bool DisconnectNode(const std::string &node); bool DisconnectNode(const CSubNet &subnet); bool DisconnectNode(const CNetAddr &addr); bool DisconnectNode(NodeId id); //! Used to convey which local services we are offering peers during node //! connection. //! //! The data returned by this is used in CNode construction, //! which is used to advertise which services we are offering //! that peer during `net_processing.cpp:PushNodeVersion()`. ServiceFlags GetLocalServices() const; //! set the max outbound target in bytes. void SetMaxOutboundTarget(uint64_t limit); uint64_t GetMaxOutboundTarget(); //! set the timeframe for the max outbound target. void SetMaxOutboundTimeframe(uint64_t timeframe); uint64_t GetMaxOutboundTimeframe(); //! check if the outbound target is reached. If param //! historicalBlockServingLimit is set true, the function will response true //! if the limit for serving historical blocks has been reached. bool OutboundTargetReached(bool historicalBlockServingLimit); //! response the bytes left in the current max outbound cycle in case of no //! limit, it will always response 0 uint64_t GetOutboundTargetBytesLeft(); //! response the time in second left in the current max outbound cycle in //! case of no limit, it will always response 0 uint64_t GetMaxOutboundTimeLeftInCycle(); uint64_t GetTotalBytesRecv(); uint64_t GetTotalBytesSent(); void SetBestHeight(int height); int GetBestHeight() const; /** Get a unique deterministic randomizer. */ CSipHasher GetDeterministicRandomizer(uint64_t id) const; unsigned int GetReceiveFloodSize() const; void WakeMessageHandler(); /** * Attempts to obfuscate tx time through exponentially distributed emitting. * Works assuming that a single interval is used. * Variable intervals will result in privacy decrease. */ int64_t PoissonNextSendInbound(int64_t now, int average_interval_seconds); void SetAsmap(std::vector asmap) { addrman.m_asmap = std::move(asmap); } private: struct ListenSocket { public: SOCKET socket; inline void AddSocketPermissionFlags(NetPermissionFlags &flags) const { NetPermissions::AddFlag(flags, m_permissions); } ListenSocket(SOCKET socket_, NetPermissionFlags permissions_) : socket(socket_), m_permissions(permissions_) {} private: NetPermissionFlags m_permissions; }; bool BindListenPort(const CService &bindAddr, bilingual_str &strError, NetPermissionFlags permissions); bool Bind(const CService &addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector &binds, const std::vector &whiteBinds); void ThreadOpenAddedConnections(); void AddAddrFetch(const std::string &strDest); void ProcessAddrFetch(); void ThreadOpenConnections(std::vector connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket &hListenSocket); void DisconnectNodes(); void NotifyNumConnectionsChanged(); void InactivityCheck(CNode *pnode); bool GenerateSelectSet(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketEvents(std::set &recv_set, std::set &send_set, std::set &error_set); void SocketHandler(); void ThreadSocketHandler(); void ThreadDNSAddressSeed(); uint64_t CalculateKeyedNetGroup(const CAddress &ad) const; CNode *FindNode(const CNetAddr &ip); CNode *FindNode(const CSubNet &subNet); CNode *FindNode(const std::string &addrName); CNode *FindNode(const CService &addr); bool AttemptToEvictConnection(); CNode *ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type); void AddWhitelistPermissionFlags(NetPermissionFlags &flags, const CNetAddr &addr) const; void DeleteNode(CNode *pnode); NodeId GetNewNodeId(); size_t SocketSendData(CNode *pnode) const; void DumpAddresses(); // Network stats void RecordBytesRecv(uint64_t bytes); void RecordBytesSent(uint64_t bytes); // Whether the node should be passed out in ForEach* callbacks static bool NodeFullyConnected(const CNode *pnode); const Config *config; // Network usage totals RecursiveMutex cs_totalBytesRecv; RecursiveMutex cs_totalBytesSent; uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv){0}; uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent){0}; // outbound limit & stats uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent); uint64_t nMaxOutboundTimeframe GUARDED_BY(cs_totalBytesSent); // P2P timeout in seconds int64_t m_peer_connect_timeout; // Whitelisted ranges. Any node connecting from these is automatically // whitelisted (as well as those connecting to whitelisted binds). std::vector vWhitelistedRange; unsigned int nSendBufferMaxSize{0}; unsigned int nReceiveFloodSize{0}; std::vector vhListenSocket; std::atomic fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; std::deque m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); RecursiveMutex m_addr_fetches_mutex; std::vector vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector vNodes GUARDED_BY(cs_vNodes); std::list vNodesDisconnected; mutable RecursiveMutex cs_vNodes; std::atomic nLastNodeId{0}; unsigned int nPrevNodeCount{0}; /** * Services this instance offers. * * This data is replicated in each CNode instance we create during peer * connection (in ConnectNode()) under a member also called * nLocalServices. * * This data is not marked const, but after being set it should not * change. See the note in CNode::nLocalServices documentation. * * \sa CNode::nLocalServices */ ServiceFlags nLocalServices; std::unique_ptr semOutbound; std::unique_ptr semAddnode; int nMaxConnections; // How many full-relay (tx, block, addr) outbound peers we want int m_max_outbound_full_relay; // How many block-relay only outbound peers we want // We do not relay tx or addr messages with these peers int m_max_outbound_block_relay; int nMaxAddnode; int nMaxFeeler; int m_max_outbound; bool m_use_addrman_outgoing; std::atomic nBestHeight; CClientUIInterface *clientInterface; NetEventsInterface *m_msgproc; /** * Pointer to this node's banman. May be nullptr - check existence before * dereferencing. */ BanMan *m_banman; /** SipHasher seeds for deterministic randomness */ const uint64_t nSeed0, nSeed1; /** flag for waking the message processor. */ bool fMsgProcWake GUARDED_BY(mutexMsgProc); std::condition_variable condMsgProc; Mutex mutexMsgProc; std::atomic flagInterruptMsgProc{false}; CThreadInterrupt interruptNet; std::thread threadDNSAddressSeed; std::thread threadSocketHandler; std::thread threadOpenAddedConnections; std::thread threadOpenConnections; std::thread threadMessageHandler; /** * flag for deciding to connect to an extra outbound peer, in excess of * m_max_outbound_full_relay. This takes the place of a feeler connection. */ std::atomic_bool m_try_another_outbound_peer; std::atomic m_next_send_inv_to_incoming{0}; friend struct ::CConnmanTest; friend struct ConnmanTestMsg; }; void Discover(); void StartMapPort(); void InterruptMapPort(); void StopMapPort(); unsigned short GetListenPort(); /** * Interface for message handling */ class NetEventsInterface { public: virtual bool ProcessMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual bool SendMessages(const Config &config, CNode *pnode, std::atomic &interrupt) = 0; virtual void InitializeNode(const Config &config, CNode *pnode) = 0; virtual void FinalizeNode(const Config &config, NodeId id, bool &update_connection_time) = 0; protected: /** * Protected destructor so that instances can only be deleted by derived * classes. If that restriction is no longer desired, this should be made * public and virtual. */ ~NetEventsInterface() = default; }; enum { // unknown LOCAL_NONE, // address a local interface listens on LOCAL_IF, // address explicit bound to LOCAL_BIND, // address reported by UPnP LOCAL_UPNP, // address explicitly specified (-externalip=) LOCAL_MANUAL, LOCAL_MAX }; bool IsPeerAddrLocalGood(CNode *pnode); void AdvertiseLocal(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) * @note Networks are reachable by default */ void SetReachable(enum Network net, bool reachable); /** @returns true if the network is reachable, false otherwise */ bool IsReachable(enum Network net); /** @returns true if the address is in a reachable network, false otherwise */ bool IsReachable(const CNetAddr &addr); bool AddLocal(const CService &addr, int nScore = LOCAL_NONE); bool AddLocal(const CNetAddr &addr, int nScore = LOCAL_NONE); void RemoveLocal(const CService &addr); bool SeenLocal(const CService &addr); bool IsLocal(const CService &addr); bool GetLocal(CService &addr, const CNetAddr *paddrPeer = nullptr); CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices); extern bool fDiscover; extern bool fListen; extern bool g_relay_txes; struct LocalServiceInfo { int nScore; int nPort; }; extern RecursiveMutex cs_mapLocalHost; extern std::map mapLocalHost GUARDED_BY(cs_mapLocalHost); extern const std::string NET_MESSAGE_COMMAND_OTHER; // Command, total bytes typedef std::map mapMsgCmdSize; /** * POD that contains various stats about a node. * Usually constructed from CConman::GetNodeStats. Stats are filled from the * node using CNode::copyStats. */ struct CNodeStats { NodeId nodeid; ServiceFlags nServices; bool fRelayTxes; int64_t nLastSend; int64_t nLastRecv; int64_t nTimeConnected; int64_t nTimeOffset; std::string addrName; int nVersion; std::string cleanSubVer; bool fInbound; bool m_manual_connection; int nStartingHeight; uint64_t nSendBytes; mapMsgCmdSize mapSendBytesPerMsgCmd; uint64_t nRecvBytes; mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; bool m_legacyWhitelisted; int64_t m_ping_usec; int64_t m_ping_wait_usec; int64_t m_min_ping_usec; Amount minFeeFilter; // Our address, as reported by the peer std::string addrLocal; // Address of this peer CAddress addr; // Bind address of our side of the connection CAddress addrBind; uint32_t m_mapped_as; }; /** * Transport protocol agnostic message container. * Ideally it should only contain receive time, payload, * command and size. */ class CNetMessage { public: //! received message data CDataStream m_recv; //! time of message receipt std::chrono::microseconds m_time{0}; bool m_valid_netmagic = false; bool m_valid_header = false; bool m_valid_checksum = false; //! size of the payload uint32_t m_message_size{0}; //! used wire size of the message (including header/checksum) uint32_t m_raw_message_size{0}; std::string m_command; CNetMessage(CDataStream &&recv_in) : m_recv(std::move(recv_in)) {} void SetVersion(int nVersionIn) { m_recv.SetVersion(nVersionIn); } }; /** * The TransportDeserializer takes care of holding and deserializing the * network receive buffer. It can deserialize the network buffer into a * transport protocol agnostic CNetMessage (command & payload) */ class TransportDeserializer { public: // returns true if the current deserialization is complete virtual bool Complete() const = 0; // set the serialization context version virtual void SetVersion(int version) = 0; // read and deserialize data virtual int Read(const Config &config, const char *data, uint32_t bytes) = 0; // decomposes a message from the context virtual CNetMessage GetMessage(const Config &config, std::chrono::microseconds time) = 0; virtual ~TransportDeserializer() {} }; class V1TransportDeserializer final : public TransportDeserializer { private: mutable CHash256 hasher; mutable uint256 data_hash; // Parsing header (false) or data (true) bool in_data; // Partially received header. CDataStream hdrbuf; // Complete header. CMessageHeader hdr; // Received message data. CDataStream vRecv; uint32_t nHdrPos; uint32_t nDataPos; const uint256 &GetMessageHash() const; int readHeader(const Config &config, const char *pch, uint32_t nBytes); int readData(const char *pch, uint32_t nBytes); void Reset() { vRecv.clear(); hdrbuf.clear(); hdrbuf.resize(24); in_data = false; nHdrPos = 0; nDataPos = 0; data_hash.SetNull(); hasher.Reset(); } public: V1TransportDeserializer( const CMessageHeader::MessageMagic &pchMessageStartIn, int nTypeIn, int nVersionIn) : hdrbuf(nTypeIn, nVersionIn), hdr(pchMessageStartIn), vRecv(nTypeIn, nVersionIn) { Reset(); } bool Complete() const override { if (!in_data) { return false; } return (hdr.nMessageSize == nDataPos); } void SetVersion(int nVersionIn) override { hdrbuf.SetVersion(nVersionIn); vRecv.SetVersion(nVersionIn); } int Read(const Config &config, const char *pch, uint32_t nBytes) override { int ret = in_data ? readData(pch, nBytes) : readHeader(config, pch, nBytes); if (ret < 0) { Reset(); } return ret; } CNetMessage GetMessage(const Config &config, std::chrono::microseconds time) override; }; /** * The TransportSerializer prepares messages for the network transport */ class TransportSerializer { public: // prepare message for transport (header construction, error-correction // computation, payload encryption, etc.) virtual void prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) = 0; virtual ~TransportSerializer() {} }; class V1TransportSerializer : public TransportSerializer { public: void prepareForTransport(const Config &config, CSerializedNetMsg &msg, std::vector &header) override; }; /** Information about a peer */ class CNode { friend class CConnman; friend struct ConnmanTestMsg; public: std::unique_ptr m_deserializer; std::unique_ptr m_serializer; // socket std::atomic nServices{NODE_NONE}; SOCKET hSocket GUARDED_BY(cs_hSocket); // Total size of all vSendMsg entries. size_t nSendSize{0}; // Offset inside the first vSendMsg already sent. size_t nSendOffset{0}; uint64_t nSendBytes GUARDED_BY(cs_vSend){0}; std::deque> vSendMsg GUARDED_BY(cs_vSend); RecursiveMutex cs_vSend; RecursiveMutex cs_hSocket; RecursiveMutex cs_vRecv; RecursiveMutex cs_vProcessMsg; std::list vProcessMsg GUARDED_BY(cs_vProcessMsg); size_t nProcessQueueSize{0}; RecursiveMutex cs_sendProcessing; std::deque vRecvGetData; uint64_t nRecvBytes GUARDED_BY(cs_vRecv){0}; std::atomic nLastSend{0}; std::atomic nLastRecv{0}; const int64_t nTimeConnected; std::atomic nTimeOffset{0}; // Address of this peer const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; std::atomic nVersion{0}; // The nonce provided by the remote host. uint64_t nRemoteHostNonce{0}; // The extra entropy provided by the remote host. uint64_t nRemoteExtraEntropy{0}; /** * cleanSubVer is a sanitized string of the user agent byte array we read * from the wire. This cleaned string can safely be logged or displayed. */ RecursiveMutex cs_SubVer; std::string cleanSubVer GUARDED_BY(cs_SubVer){}; // This peer is preferred for eviction. bool m_prefer_evict{false}; bool HasPermission(NetPermissionFlags permission) const { return NetPermissions::HasFlag(m_permissionFlags, permission); } // This boolean is unusued in actual processing, only present for backward // compatibility at RPC/QT level bool m_legacyWhitelisted{false}; // set by version message bool fClient{false}; // after BIP159, set by version message bool m_limited_node{false}; + /** + * Whether the peer has signaled support for receiving ADDRv2 (BIP155) + * messages, implying a preference to receive ADDRv2 instead of ADDR ones. + */ + std::atomic_bool m_wants_addrv2{false}; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs std::atomic_bool fDisconnect{false}; bool fSentAddr{false}; CSemaphoreGrant grantOutbound; std::atomic nRefCount{0}; const uint64_t nKeyedNetGroup; std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; bool IsOutboundOrBlockRelayConn() const { switch (m_conn_type) { case ConnectionType::OUTBOUND: case ConnectionType::BLOCK_RELAY: return true; case ConnectionType::INBOUND: case ConnectionType::MANUAL: case ConnectionType::ADDR_FETCH: case ConnectionType::FEELER: return false; } assert(false); } bool IsFullOutboundConn() const { return m_conn_type == ConnectionType::OUTBOUND; } bool IsManualConn() const { return m_conn_type == ConnectionType::MANUAL; } bool IsBlockOnlyConn() const { return m_conn_type == ConnectionType::BLOCK_RELAY; } bool IsFeelerConn() const { return m_conn_type == ConnectionType::FEELER; } bool IsAddrFetchConn() const { return m_conn_type == ConnectionType::ADDR_FETCH; } bool IsInboundConn() const { return m_conn_type == ConnectionType::INBOUND; } bool ExpectServicesFromConn() const { switch (m_conn_type) { case ConnectionType::INBOUND: case ConnectionType::MANUAL: case ConnectionType::FEELER: return false; case ConnectionType::OUTBOUND: case ConnectionType::BLOCK_RELAY: case ConnectionType::ADDR_FETCH: return true; } assert(false); } protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); public: BlockHash hashContinue; std::atomic nStartingHeight{-1}; // flood relay std::vector vAddrToSend; std::unique_ptr m_addr_known = nullptr; bool fGetAddr{false}; std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0}; std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0}; bool IsAddrRelayPeer() const { return m_addr_known != nullptr; } // List of block ids we still have to announce. // There is no final sorting before sending, as they are always sent // immediately and in the order requested. std::vector vInventoryBlockToSend GUARDED_BY(cs_inventory); RecursiveMutex cs_inventory; struct TxRelay { mutable RecursiveMutex cs_filter; // We use fRelayTxes for two purposes - // a) it allows us to not relay tx invs before receiving the peer's // version message. // b) the peer may tell us in its version message that we should not // relay tx invs unless it loads a bloom filter. bool fRelayTxes GUARDED_BY(cs_filter){false}; std::unique_ptr pfilter PT_GUARDED_BY(cs_filter) GUARDED_BY(cs_filter){nullptr}; mutable RecursiveMutex cs_tx_inventory; CRollingBloomFilter filterInventoryKnown GUARDED_BY(cs_tx_inventory){ 50000, 0.000001}; // Set of transaction ids we still have to announce. // They are sorted by the mempool before relay, so the order is not // important. std::set setInventoryTxToSend; // Used for BIP35 mempool sending bool fSendMempool GUARDED_BY(cs_tx_inventory){false}; // Last time a "MEMPOOL" request was serviced. std::atomic m_last_mempool_req{ std::chrono::seconds{0}}; std::chrono::microseconds nNextInvSend{0}; RecursiveMutex cs_feeFilter; // Minimum fee rate with which to filter inv's to this node Amount minFeeFilter GUARDED_BY(cs_feeFilter){Amount::zero()}; Amount lastSentFeeFilter{Amount::zero()}; int64_t nextSendTimeFeeFilter{0}; }; // m_tx_relay == nullptr if we're not relaying transactions with this peer std::unique_ptr m_tx_relay; struct AvalancheState { AvalancheState() {} avalanche::Delegation delegation; }; // m_avalanche_state == nullptr if we're not using avalanche with this peer std::unique_ptr m_avalanche_state; // Used for headers announcements - unfiltered blocks to relay std::vector vBlockHashesToAnnounce GUARDED_BY(cs_inventory); // Block and TXN accept times std::atomic nLastBlockTime{0}; std::atomic nLastTXTime{0}; // Ping time measurement: // The pong reply we're expecting, or 0 if no pong expected. std::atomic nPingNonceSent{0}; /** When the last ping was sent, or 0 if no ping was ever sent */ std::atomic m_ping_start{ std::chrono::microseconds{0}}; // Last measured round-trip time. std::atomic nPingUsecTime{0}; // Best measured round-trip time. std::atomic nMinPingUsecTime{std::numeric_limits::max()}; // Whether a ping is requested. std::atomic fPingQueued{false}; std::set orphan_work_set; CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, uint64_t nLocalExtraEntropyIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in); ~CNode(); CNode(const CNode &) = delete; CNode &operator=(const CNode &) = delete; private: const NodeId id; const uint64_t nLocalHostNonce; const uint64_t nLocalExtraEntropy; const ConnectionType m_conn_type; std::atomic m_greatest_common_version{INIT_PROTO_VERSION}; //! Services offered to this peer. //! //! This is supplied by the parent CConnman during peer connection //! (CConnman::ConnectNode()) from its attribute of the same name. //! //! This is const because there is no protocol defined for renegotiating //! services initially offered to a peer. The set of local services we //! offer should not change after initialization. //! //! An interesting example of this is NODE_NETWORK and initial block //! download: a node which starts up from scratch doesn't have any blocks //! to serve, but still advertises NODE_NETWORK because it will eventually //! fulfill this role after IBD completes. P2P code is written in such a //! way that it can gracefully handle peers who don't make good on their //! service advertisements. const ServiceFlags nLocalServices; const int nMyStartingHeight; NetPermissionFlags m_permissionFlags{PF_NONE}; // Used only by SocketHandler thread std::list vRecvMsg; mutable RecursiveMutex cs_addrName; std::string addrName GUARDED_BY(cs_addrName); // Our address, as reported by the peer CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; public: NodeId GetId() const { return id; } uint64_t GetLocalNonce() const { return nLocalHostNonce; } uint64_t GetLocalExtraEntropy() const { return nLocalExtraEntropy; } int GetMyStartingHeight() const { return nMyStartingHeight; } int GetRefCount() const { assert(nRefCount >= 0); return nRefCount; } bool ReceiveMsgBytes(const Config &config, const char *pch, uint32_t nBytes, bool &complete); void SetCommonVersion(int greatest_common_version) { m_greatest_common_version = greatest_common_version; } int GetCommonVersion() const { return m_greatest_common_version; } CService GetAddrLocal() const; //! May not be called more than once void SetAddrLocal(const CService &addrLocalIn); CNode *AddRef() { nRefCount++; return this; } void Release() { nRefCount--; } void AddAddressKnown(const CAddress &_addr) { assert(m_addr_known); m_addr_known->insert(_addr.GetKey()); } void PushAddress(const CAddress &_addr, FastRandomContext &insecure_rand) { + // Whether the peer supports the address in `_addr`. For example, + // nodes that do not implement BIP155 cannot receive Tor v3 addresses + // because they require ADDRv2 (BIP155) encoding. + const bool addr_format_supported = + m_wants_addrv2 || _addr.IsAddrV1Compatible(); + // Known checking here is only to save space from duplicates. // SendMessages will filter it again for knowns that were added // after addresses were pushed. assert(m_addr_known); - if (_addr.IsValid() && !m_addr_known->contains(_addr.GetKey())) { + if (_addr.IsValid() && !m_addr_known->contains(_addr.GetKey()) && + addr_format_supported) { if (vAddrToSend.size() >= MAX_ADDR_TO_SEND) { vAddrToSend[insecure_rand.randrange(vAddrToSend.size())] = _addr; } else { vAddrToSend.push_back(_addr); } } } void AddKnownTx(const TxId &txid) { if (m_tx_relay != nullptr) { LOCK(m_tx_relay->cs_tx_inventory); m_tx_relay->filterInventoryKnown.insert(txid); } } void PushTxInventory(const TxId &txid) { if (m_tx_relay == nullptr) { return; } LOCK(m_tx_relay->cs_tx_inventory); if (!m_tx_relay->filterInventoryKnown.contains(txid)) { m_tx_relay->setInventoryTxToSend.insert(txid); } } void PushBlockInventory(const BlockHash &blockhash) { LOCK(cs_inventory); vInventoryBlockToSend.push_back(blockhash); } void PushBlockHash(const BlockHash &hash) { LOCK(cs_inventory); vBlockHashesToAnnounce.push_back(hash); } void CloseSocketDisconnect(); void copyStats(CNodeStats &stats, const std::vector &m_asmap); ServiceFlags GetLocalServices() const { return nLocalServices; } std::string GetAddrName() const; //! Sets the addrName only if it was not previously set void MaybeSetAddrName(const std::string &addrNameIn); }; /** * Return a timestamp in the future (in microseconds) for exponentially * distributed events. */ int64_t PoissonNextSend(int64_t now, int average_interval_seconds); /** Wrapper to return mockable type */ inline std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval) { return std::chrono::microseconds{ PoissonNextSend(now.count(), average_interval.count())}; } std::string getSubVersionEB(uint64_t MaxBlockSize); std::string userAgent(const Config &config); #endif // BITCOIN_NET_H diff --git a/src/net_processing.cpp b/src/net_processing.cpp index bd731b3e1..d32bc847c 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1,5526 +1,5556 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include // For NDEBUG compile time check #include #include #include #include #include /** Expiration time for orphan transactions in seconds */ static constexpr int64_t ORPHAN_TX_EXPIRE_TIME = 20 * 60; /** Minimum time between orphan transactions expire time checks in seconds */ static constexpr int64_t ORPHAN_TX_EXPIRE_INTERVAL = 5 * 60; /** How long to cache transactions in mapRelay for normal relay */ static constexpr std::chrono::seconds RELAY_TX_CACHE_TIME{15 * 60}; /** * Headers download timeout expressed in microseconds. * Timeout = base + per_header * (expected number of headers) */ // 15 minutes static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_BASE = 15 * 60 * 1000000; // 1ms/header static constexpr int64_t HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1000; /** * Protect at least this many outbound peers from disconnection due to * slow/behind headers chain. */ static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4; /** * Timeout for (unprotected) outbound peers to sync to our chainwork, in * seconds. */ // 20 minutes static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; /** How frequently to check for stale tips, in seconds */ // 10 minutes static constexpr int64_t STALE_CHECK_INTERVAL = 10 * 60; /** * How frequently to check for extra outbound peers and disconnect, in seconds. */ static constexpr int64_t EXTRA_PEER_CHECK_INTERVAL = 45; /** * Minimum time an outbound-peer-eviction candidate must be connected for, in * order to evict, in seconds. */ static constexpr int64_t MINIMUM_CONNECT_TIME = 30; /** SHA256("main address relay")[0:8] */ static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; /// Age after which a stale block will no longer be served if requested as /// protection against fingerprinting. Set to one month, denominated in seconds. static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60; /// Age after which a block is considered historical for purposes of rate /// limiting block relay. Set to one week, denominated in seconds. static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60; /** * Time between pings automatically sent out for latency probing and keepalive. */ static constexpr std::chrono::minutes PING_INTERVAL{2}; /** The maximum number of entries in a locator */ static const unsigned int MAX_LOCATOR_SZ = 101; /** The maximum number of entries in an 'inv' protocol message */ static const unsigned int MAX_INV_SZ = 50000; static_assert(MAX_PROTOCOL_MESSAGE_LENGTH > MAX_INV_SZ * sizeof(CInv), "Max protocol message length must be greater than largest " "possible INV message"); /** Maximum number of in-flight transactions from a peer */ static constexpr int32_t MAX_PEER_TX_IN_FLIGHT = 100; /** Maximum number of announced transactions from a peer */ static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 2 * MAX_INV_SZ; /** How many microseconds to delay requesting transactions from inbound peers */ static constexpr std::chrono::microseconds INBOUND_PEER_TX_DELAY{ std::chrono::seconds{2}}; /** * How long to wait (in microseconds) before downloading a transaction from an * additional peer. */ static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{ std::chrono::seconds{60}}; /** * Maximum delay (in microseconds) for transaction requests to avoid biasing * some peers over others. */ static constexpr std::chrono::microseconds MAX_GETDATA_RANDOM_DELAY{ std::chrono::seconds{2}}; /** * How long to wait (in microseconds) before expiring an in-flight getdata * request to a peer. */ static constexpr std::chrono::microseconds TX_EXPIRY_INTERVAL{ GETDATA_TX_INTERVAL * 10}; static_assert(INBOUND_PEER_TX_DELAY >= MAX_GETDATA_RANDOM_DELAY, "To preserve security, MAX_GETDATA_RANDOM_DELAY should not " "exceed INBOUND_PEER_DELAY"); /** * Limit to avoid sending big packets. Not used in processing incoming GETDATA * for compatibility. */ static const unsigned int MAX_GETDATA_SZ = 1000; /** * Number of blocks that can be requested at any given time from a single peer. */ static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16; /** * Timeout in seconds during which a peer must stall block download progress * before being disconnected. */ static const unsigned int BLOCK_STALLING_TIMEOUT = 2; /** * Number of headers sent in one getheaders result. We rely on the assumption * that if a peer sends * less than this number, we reached its tip. Changing this value is a protocol * upgrade. */ static const unsigned int MAX_HEADERS_RESULTS = 2000; /** * Maximum depth of blocks we're willing to serve as compact blocks to peers * when requested. For older blocks, a regular BLOCK response will be sent. */ static const int MAX_CMPCTBLOCK_DEPTH = 5; /** * Maximum depth of blocks we're willing to respond to GETBLOCKTXN requests * for. */ static const int MAX_BLOCKTXN_DEPTH = 10; /** * Size of the "block download window": how far ahead of our current height do * we fetch? Larger windows tolerate larger download speed differences between * peer, but increase the potential degree of disordering of blocks on disk * (which make reindexing and pruning harder). We'll probably * want to make this a per-peer adaptive value at some point. */ static const unsigned int BLOCK_DOWNLOAD_WINDOW = 1024; /** * Block download timeout base, expressed in millionths of the block interval * (i.e. 10 min) */ static const int64_t BLOCK_DOWNLOAD_TIMEOUT_BASE = 1000000; /** * Additional block download timeout per parallel downloading peer (i.e. 5 min) */ static const int64_t BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 500000; /** * Maximum number of headers to announce when relaying blocks with headers * message. */ static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8; /** Maximum number of unconnecting headers announcements before DoS score */ static const int MAX_UNCONNECTING_HEADERS = 10; /** Minimum blocks required to signal NODE_NETWORK_LIMITED */ static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288; /** * Average delay between local address broadcasts. */ static constexpr std::chrono::hours AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24}; /** * Average delay between peer address broadcasts. */ static const std::chrono::seconds AVG_ADDRESS_BROADCAST_INTERVAL{30}; /** * Average delay between trickled inventory transmissions in seconds. * Blocks and whitelisted receivers bypass this, outbound peers get half this * delay. */ static const unsigned int INVENTORY_BROADCAST_INTERVAL = 5; /** * Maximum number of inventory items to send per transmission. * Limits the impact of low-fee transaction floods. */ static constexpr unsigned int INVENTORY_BROADCAST_MAX_PER_MB = 7 * INVENTORY_BROADCAST_INTERVAL; /** * Average delay between feefilter broadcasts in seconds. */ static constexpr unsigned int AVG_FEEFILTER_BROADCAST_INTERVAL = 10 * 60; /** * Maximum feefilter broadcast delay after significant change. */ static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60; /** * Maximum number of compact filters that may be requested with one * getcfilters. See BIP 157. */ static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; /** * Maximum number of cf hashes that may be requested with one getcfheaders. See * BIP 157. */ static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; /// How many non standard orphan do we consider from a node before ignoring it. static constexpr uint32_t MAX_NON_STANDARD_ORPHAN_PER_NODE = 5; struct COrphanTx { // When modifying, adapt the copy of this definition in tests/DoS_tests. CTransactionRef tx; NodeId fromPeer; int64_t nTimeExpire; size_t list_pos; }; RecursiveMutex g_cs_orphans; std::map mapOrphanTransactions GUARDED_BY(g_cs_orphans); void EraseOrphansFor(NodeId peer); // Internal stuff namespace { /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; /** * Sources of received blocks, saved to be able to punish them when processing * happens afterwards. * Set mapBlockSource[hash].second to false if the node should not be punished * if the block is invalid. */ std::map> mapBlockSource GUARDED_BY(cs_main); /** * Filter for transactions that were recently rejected by AcceptToMemoryPool. * These are not rerequested until the chain tip changes, at which point the * entire filter is reset. * * Without this filter we'd be re-requesting txs from each of our peers, * increasing bandwidth consumption considerably. For instance, with 100 peers, * half of which relay a tx we don't accept, that might be a 50x bandwidth * increase. A flooding attacker attempting to roll-over the filter using * minimum-sized, 60byte, transactions might manage to send 1000/sec if we have * fast peers, so we pick 120,000 to give our peers a two minute window to send * invs to us. * * Decreasing the false positive rate is fairly cheap, so we pick one in a * million to make it highly unlikely for users to have issues with this filter. * * Memory used: 1.3 MB */ std::unique_ptr recentRejects GUARDED_BY(cs_main); uint256 hashRecentRejectsChainTip GUARDED_BY(cs_main); /** * Filter for transactions that have been recently confirmed. * We use this to avoid requesting transactions that have already been * confirmed. */ RecursiveMutex g_cs_recent_confirmed_transactions; std::unique_ptr g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions); /** * Blocks that are in flight, and that are in the queue to be downloaded. */ struct QueuedBlock { BlockHash hash; //! Optional. const CBlockIndex *pindex; //! Whether this block has validated headers at the time of request. bool fValidatedHeaders; //! Optional, used for CMPCTBLOCK downloads std::unique_ptr partialBlock; }; std::map::iterator>> mapBlocksInFlight GUARDED_BY(cs_main); /** Stack of nodes which we have set to announce using compact blocks */ std::list lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); /** Number of preferable block download peers. */ int nPreferredDownload GUARDED_BY(cs_main) = 0; /** Number of peers from which we're downloading blocks. */ int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; /** Number of outbound peers with m_chain_sync.m_protect. */ int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; /** When our tip was last updated. */ std::atomic g_last_tip_update(0); /** Relay map. */ typedef std::map MapRelay; MapRelay mapRelay GUARDED_BY(cs_main); /** * Expiration-time ordered list of (expire time, relay map entry) pairs, * protected by cs_main). */ std::deque> vRelayExpiration GUARDED_BY(cs_main); struct IteratorComparator { template bool operator()(const I &a, const I &b) const { return &(*a) < &(*b); } }; std::map::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(g_cs_orphans); //! For random eviction std::vector::iterator> g_orphan_list GUARDED_BY(g_cs_orphans); static size_t vExtraTxnForCompactIt GUARDED_BY(g_cs_orphans) = 0; static std::vector> vExtraTxnForCompact GUARDED_BY(g_cs_orphans); } // namespace namespace { /** * Maintain validation-specific state about nodes, protected by cs_main, instead * by CNode's own locks. This simplifies asynchronous operation, where * processing of incoming data is done after the ProcessMessage call returns, * and we're no longer holding the node's locks. */ struct CNodeState { //! The peer's address const CService address; //! Whether we have a fully established connection. bool fCurrentlyConnected; //! The best known block we know this peer has announced. const CBlockIndex *pindexBestKnownBlock; //! The hash of the last unknown block this peer has announced. BlockHash hashLastUnknownBlock; //! The last full block we both have. const CBlockIndex *pindexLastCommonBlock; //! The best header we have sent our peer. const CBlockIndex *pindexBestHeaderSent; //! Length of current-streak of unconnecting headers announcements int nUnconnectingHeaders; //! Whether we've started headers synchronization with this peer. bool fSyncStarted; //! When to potentially disconnect peer for stalling headers download int64_t nHeadersSyncTimeout; //! Since when we're stalling block download progress (in microseconds), or //! 0. int64_t nStallingSince; std::list vBlocksInFlight; //! When the first entry in vBlocksInFlight started downloading. Don't care //! when vBlocksInFlight is empty. int64_t nDownloadingSince; int nBlocksInFlight; int nBlocksInFlightValidHeaders; //! Whether we consider this a preferred download peer. bool fPreferredDownload; //! Whether this peer wants invs or headers (when possible) for block //! announcements. bool fPreferHeaders; //! Whether this peer wants invs or cmpctblocks (when possible) for block //! announcements. bool fPreferHeaderAndIDs; /** * Whether this peer will send us cmpctblocks if we request them. * This is not used to gate request logic, as we really only care about * fSupportsDesiredCmpctVersion, but is used as a flag to "lock in" the * version of compact blocks we send. */ bool fProvidesHeaderAndIDs; /** * If we've announced NODE_WITNESS to this peer: whether the peer sends * witnesses in cmpctblocks/blocktxns, otherwise: whether this peer sends * non-witnesses in cmpctblocks/blocktxns. */ bool fSupportsDesiredCmpctVersion; /** * State used to enforce CHAIN_SYNC_TIMEOUT * Only in effect for outbound, non-manual, full-relay connections, with * m_protect == false * Algorithm: if a peer's best known block has less work than our tip, set * a timeout CHAIN_SYNC_TIMEOUT seconds in the future: * - If at timeout their best known block now has more work than our tip * when the timeout was set, then either reset the timeout or clear it * (after comparing against our current tip's work) * - If at timeout their best known block still has less work than our tip * did when the timeout was set, then send a getheaders message, and set a * shorter timeout, HEADERS_RESPONSE_TIME seconds in future. If their best * known block is still behind when that new timeout is reached, disconnect. */ struct ChainSyncTimeoutState { //! A timeout used for checking whether our peer has sufficiently //! synced. int64_t m_timeout; //! A header with the work we require on our peer's chain. const CBlockIndex *m_work_header; //! After timeout is reached, set to true after sending getheaders. bool m_sent_getheaders; //! Whether this peer is protected from disconnection due to a bad/slow //! chain. bool m_protect; }; ChainSyncTimeoutState m_chain_sync; //! Time of last new block announcement int64_t m_last_block_announcement; /* * State associated with transaction download. * * Tx download algorithm: * * When inv comes in, queue up (process_time, txid) inside the peer's * CNodeState (m_tx_process_time) as long as m_tx_announced for the peer * isn't too big (MAX_PEER_TX_ANNOUNCEMENTS). * * The process_time for a transaction is set to nNow for outbound peers, * nNow + 2 seconds for inbound peers. This is the time at which we'll * consider trying to request the transaction from the peer in * SendMessages(). The delay for inbound peers is to allow outbound peers * a chance to announce before we request from inbound peers, to prevent * an adversary from using inbound connections to blind us to a * transaction (InvBlock). * * When we call SendMessages() for a given peer, * we will loop over the transactions in m_tx_process_time, looking * at the transactions whose process_time <= nNow. We'll request each * such transaction that we don't have already and that hasn't been * requested from another peer recently, up until we hit the * MAX_PEER_TX_IN_FLIGHT limit for the peer. Then we'll update * g_already_asked_for for each requested txid, storing the time of the * GETDATA request. We use g_already_asked_for to coordinate transaction * requests amongst our peers. * * For transactions that we still need but we have already recently * requested from some other peer, we'll reinsert (process_time, txid) * back into the peer's m_tx_process_time at the point in the future at * which the most recent GETDATA request would time out (ie * GETDATA_TX_INTERVAL + the request time stored in g_already_asked_for). * We add an additional delay for inbound peers, again to prefer * attempting download from outbound peers first. * We also add an extra small random delay up to 2 seconds * to avoid biasing some peers over others. (e.g., due to fixed ordering * of peer processing in ThreadMessageHandler). * * When we receive a transaction from a peer, we remove the txid from the * peer's m_tx_in_flight set and from their recently announced set * (m_tx_announced). We also clear g_already_asked_for for that entry, so * that if somehow the transaction is not accepted but also not added to * the reject filter, then we will eventually redownload from other * peers. */ struct TxDownloadState { /** * Track when to attempt download of announced transactions (process * time in micros -> txid) */ std::multimap m_tx_process_time; //! Store all the transactions a peer has recently announced std::set m_tx_announced; //! Store transactions which were requested by us, with timestamp std::map m_tx_in_flight; //! Periodically check for stuck getdata requests std::chrono::microseconds m_check_expiry_timer{0}; }; TxDownloadState m_tx_download; struct AvalancheState { std::chrono::time_point last_poll; }; AvalancheState m_avalanche_state; //! Whether this peer is an inbound connection bool m_is_inbound; //! Whether this peer is a manual connection bool m_is_manual_connection; CNodeState(CAddress addrIn, bool is_inbound, bool is_manual) : address(addrIn), m_is_inbound(is_inbound), m_is_manual_connection(is_manual) { fCurrentlyConnected = false; pindexBestKnownBlock = nullptr; hashLastUnknownBlock = BlockHash(); pindexLastCommonBlock = nullptr; pindexBestHeaderSent = nullptr; nUnconnectingHeaders = 0; fSyncStarted = false; nHeadersSyncTimeout = 0; nStallingSince = 0; nDownloadingSince = 0; nBlocksInFlight = 0; nBlocksInFlightValidHeaders = 0; fPreferredDownload = false; fPreferHeaders = false; fPreferHeaderAndIDs = false; fProvidesHeaderAndIDs = false; fSupportsDesiredCmpctVersion = false; m_chain_sync = {0, nullptr, false, false}; m_last_block_announcement = 0; } }; // Keeps track of the time (in microseconds) when transactions were requested // last time limitedmap g_already_asked_for GUARDED_BY(cs_main)(MAX_INV_SZ); /** Map maintaining per-node state. */ static std::map mapNodeState GUARDED_BY(cs_main); static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::map::iterator it = mapNodeState.find(pnode); if (it == mapNodeState.end()) { return nullptr; } return &it->second; } /** * Data structure for an individual peer. This struct is not protected by * cs_main since it does not contain validation-critical data. * * Memory is owned by shared pointers and this object is destructed when * the refcount drops to zero. * * TODO: move most members from CNodeState to this structure. * TODO: move remaining application-layer data members from CNode to this * structure. */ struct Peer { /** Same id as the CNode object for this peer */ const NodeId m_id{0}; /** Protects misbehavior data members */ Mutex m_misbehavior_mutex; /** Accumulated misbehavior score for this peer */ int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0}; /** Whether this peer should be disconnected and marked as discouraged * (unless it has the noban permission). */ bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false}; Peer(NodeId id) : m_id(id) {} }; using PeerRef = std::shared_ptr; /** * Map of all Peer objects, keyed by peer id. This map is protected * by the global g_peer_mutex. Once a shared pointer reference is * taken, the lock may be released. Individual fields are protected by * their own locks. */ Mutex g_peer_mutex; static std::map g_peer_map GUARDED_BY(g_peer_mutex); /** * Get a shared pointer to the Peer object. * May return nullptr if the Peer object can't be found. */ static PeerRef GetPeerRef(NodeId id) { LOCK(g_peer_mutex); auto it = g_peer_map.find(id); return it != g_peer_map.end() ? it->second : nullptr; } static void UpdatePreferredDownload(const CNode &node, CNodeState *state) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient; nPreferredDownload += state->fPreferredDownload; } static void PushNodeVersion(const Config &config, CNode &pnode, CConnman &connman, int64_t nTime) { // Note that pnode.GetLocalServices() is a reflection of the local // services we were offering when the CNode object was created for this // peer. ServiceFlags nLocalNodeServices = pnode.GetLocalServices(); uint64_t nonce = pnode.GetLocalNonce(); int nNodeStartingHeight = pnode.GetMyStartingHeight(); NodeId nodeid = pnode.GetId(); CAddress addr = pnode.addr; uint64_t extraEntropy = pnode.GetLocalExtraEntropy(); CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices)); CAddress addrMe = CAddress(CService(), nLocalNodeServices); connman.PushMessage( &pnode, CNetMsgMaker(INIT_PROTO_VERSION) .Make(NetMsgType::VERSION, PROTOCOL_VERSION, uint64_t(nLocalNodeServices), nTime, addrYou, addrMe, nonce, userAgent(config), nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr, extraEntropy)); if (fLogIPs) { LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, " "peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid); } else { LogPrint( BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } // Returns a bool indicating whether we requested this block. // Also used if a block was /not/ received and timed out or started with another // peer. static bool MarkBlockAsReceived(const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); assert(state != nullptr); state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { // Last validated block on the queue was received. nPeersWithValidatedDownloads--; } if (state->vBlocksInFlight.begin() == itInFlight->second.second) { // First block on the queue was received, update the start download // time for the next one state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros()); } state->vBlocksInFlight.erase(itInFlight->second.second); state->nBlocksInFlight--; state->nStallingSince = 0; mapBlocksInFlight.erase(itInFlight); return true; } return false; } // returns false, still setting pit, if the block was already in flight from the // same peer // pit will only be valid as long as the same cs_main lock is being held. static bool MarkBlockAsInFlight(const Config &config, CTxMemPool &mempool, NodeId nodeid, const BlockHash &hash, const Consensus::Params &consensusParams, const CBlockIndex *pindex = nullptr, std::list::iterator **pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState *state = State(nodeid); assert(state != nullptr); // Short-circuit most stuff in case it is from the same node. std::map::iterator>>::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) { if (pit) { *pit = &itInFlight->second.second; } return false; } // Make sure it's not listed somewhere already. MarkBlockAsReceived(hash); std::list::iterator it = state->vBlocksInFlight.insert( state->vBlocksInFlight.end(), {hash, pindex, pindex != nullptr, std::unique_ptr( pit ? new PartiallyDownloadedBlock(config, &mempool) : nullptr)}); state->nBlocksInFlight++; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->nDownloadingSince = GetTimeMicros(); } if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) { nPeersWithValidatedDownloads++; } itInFlight = mapBlocksInFlight .insert(std::make_pair(hash, std::make_pair(nodeid, it))) .first; if (pit) { *pit = &itInFlight->second.second; } return true; } /** Check whether the last unknown block a peer advertised is not yet known. */ static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState *state = State(nodeid); assert(state != nullptr); if (!state->hashLastUnknownBlock.IsNull()) { const CBlockIndex *pindex = LookupBlockIndex(state->hashLastUnknownBlock); if (pindex && pindex->nChainWork > 0) { if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; } state->hashLastUnknownBlock.SetNull(); } } } /** Update tracking information about which blocks a peer is assumed to have. */ static void UpdateBlockAvailability(NodeId nodeid, const BlockHash &hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState *state = State(nodeid); assert(state != nullptr); ProcessBlockAvailability(nodeid); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex && pindex->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; } } else { // An unknown block was announced; just assume that the latest one is // the best one. state->hashLastUnknownBlock = hash; } } /** * When a peer sends us a valid block, instruct it to announce blocks to us * using CMPCTBLOCK if possible by adding its nodeid to the end of * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by * removing the first element if necessary. */ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman &connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); CNodeState *nodestate = State(nodeid); if (!nodestate) { LogPrint(BCLog::NET, "node state unavailable: peer=%d\n", nodeid); return; } if (!nodestate->fProvidesHeaderAndIDs) { return; } for (std::list::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { if (*it == nodeid) { lNodesAnnouncingHeaderAndIDs.erase(it); lNodesAnnouncingHeaderAndIDs.push_back(nodeid); return; } } connman.ForNode(nodeid, [&connman](CNode *pfrom) { AssertLockHeld(cs_main); uint64_t nCMPCTBLOCKVersion = 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. connman.ForNode( lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode *pnodeStop) { AssertLockHeld(cs_main); connman.PushMessage( pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()) .Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()) .Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); return true; }); } static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (g_last_tip_update == 0) { g_last_tip_update = GetTime(); } return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); } static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return ::ChainActive().Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20; } static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight)) { return true; } if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight)) { return true; } return false; } /** * Update pindexLastCommonBlock and add not-in-flight missing successors to * vBlocks, until it has at most count entries. */ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector &vBlocks, NodeId &nodeStaller, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { if (count == 0) { return; } vBlocks.reserve(vBlocks.size() + count); CNodeState *state = State(nodeid); assert(state != nullptr); // Make sure pindexBestKnownBlock is up to date, we'll need it. ProcessBlockAvailability(nodeid); if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < ::ChainActive().Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has nothing interesting. return; } if (state->pindexLastCommonBlock == nullptr) { // Bootstrap quickly by guessing a parent of our best tip is the forking // point. Guessing wrong in either direction is not a problem. state->pindexLastCommonBlock = ::ChainActive()[std::min( state->pindexBestKnownBlock->nHeight, ::ChainActive().Height())]; } // If the peer reorganized, our previous pindexLastCommonBlock may not be an // ancestor of its current tip anymore. Go back enough to fix that. state->pindexLastCommonBlock = LastCommonAncestor( state->pindexLastCommonBlock, state->pindexBestKnownBlock); if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) { return; } std::vector vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more // than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last linked block we have in // common with this peer. The +1 is so we can detect stalling, namely if we // would be able to download that next block if the window were 1 larger. int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW; int nMaxHeight = std::min(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1); NodeId waitingfor = -1; while (pindexWalk->nHeight < nMaxHeight) { // Read up to 128 (or more, if more blocks than that are needed) // successors of pindexWalk (towards pindexBestKnownBlock) into // vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as // expensive as iterating over ~100 CBlockIndex* entries anyway. int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max(count - vBlocks.size(), 128)); vToFetch.resize(nToFetch); pindexWalk = state->pindexBestKnownBlock->GetAncestor( pindexWalk->nHeight + nToFetch); vToFetch[nToFetch - 1] = pindexWalk; for (unsigned int i = nToFetch - 1; i > 0; i--) { vToFetch[i - 1] = vToFetch[i]->pprev; } // Iterate over those blocks in vToFetch (in forward direction), adding // the ones that are not yet downloaded and not in flight to vBlocks. In // the meantime, update pindexLastCommonBlock as long as all ancestors // are already downloaded, or if it's already part of our chain (and // therefore don't need it even if pruned). for (const CBlockIndex *pindex : vToFetch) { if (!pindex->IsValid(BlockValidity::TREE)) { // We consider the chain that this peer is on invalid. return; } if (pindex->nStatus.hasData() || ::ChainActive().Contains(pindex)) { if (pindex->HaveTxsDownloaded()) { state->pindexLastCommonBlock = pindex; } } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. if (vBlocks.size() == 0 && waitingfor != nodeid) { // We aren't able to fetch anything, but we would be if // the download window was one larger. nodeStaller = waitingfor; } return; } vBlocks.push_back(pindex); if (vBlocks.size() == count) { return; } } else if (waitingfor == -1) { // This is the first already-in-flight block. waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first; } } } } void EraseTxRequest(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { g_already_asked_for.erase(txid); } std::chrono::microseconds GetTxRequestTime(const TxId &txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { auto it = g_already_asked_for.find(txid); if (it != g_already_asked_for.end()) { return it->second; } return {}; } void UpdateTxRequestTime(const TxId &txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { auto it = g_already_asked_for.find(txid); if (it == g_already_asked_for.end()) { g_already_asked_for.insert(std::make_pair(txid, request_time)); } else { g_already_asked_for.update(it, request_time); } } std::chrono::microseconds CalculateTxGetDataTime(const TxId &txid, std::chrono::microseconds current_time, bool use_inbound_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::chrono::microseconds process_time; const auto last_request_time = GetTxRequestTime(txid); // First time requesting this tx if (last_request_time.count() == 0) { process_time = current_time; } else { // Randomize the delay to avoid biasing some peers over others (such as // due to fixed ordering of peer processing in ThreadMessageHandler) process_time = last_request_time + GETDATA_TX_INTERVAL + GetRandMicros(MAX_GETDATA_RANDOM_DELAY); } // We delay processing announcements from inbound peers if (use_inbound_delay) { process_time += INBOUND_PEER_TX_DELAY; } return process_time; } void RequestTx(CNodeState *state, const TxId &txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState::TxDownloadState &peer_download_state = state->m_tx_download; if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS || peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS || peer_download_state.m_tx_announced.count(txid)) { // Too many queued announcements from this peer, or we already have // this announcement return; } peer_download_state.m_tx_announced.insert(txid); // Calculate the time to try requesting this transaction. Use // fPreferredDownload as a proxy for outbound peers. const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload); peer_download_state.m_tx_process_time.emplace(process_time, txid); } } // namespace // This function is used for testing the stale tip eviction logic, see // denialofservice_tests.cpp void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) { LOCK(cs_main); CNodeState *state = State(node); if (state) { state->m_last_block_announcement = time_in_seconds; } } void PeerManager::InitializeNode(const Config &config, CNode *pnode) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); NodeId nodeid = pnode->GetId(); { LOCK(cs_main); mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, pnode->IsInboundConn(), pnode->IsManualConn())); } { PeerRef peer = std::make_shared(nodeid); LOCK(g_peer_mutex); g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer)); } if (!pnode->IsInboundConn()) { PushNodeVersion(config, *pnode, m_connman, GetTime()); } } void PeerManager::ReattemptInitialBroadcast(CScheduler &scheduler) const { std::set unbroadcast_txids = m_mempool.GetUnbroadcastTxs(); for (const TxId &txid : unbroadcast_txids) { // Sanity check: all unbroadcast txns should exist in the mempool if (m_mempool.exists(txid)) { RelayTransaction(txid, m_connman); } else { m_mempool.RemoveUnbroadcastTx(txid, true); } } // Schedule next run for 10-15 minutes in the future. // We add randomness on every cycle to avoid the possibility of P2P // fingerprinting. const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5}); scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); } void PeerManager::FinalizeNode(const Config &config, NodeId nodeid, bool &fUpdateConnectionTime) { fUpdateConnectionTime = false; LOCK(cs_main); int misbehavior{0}; { PeerRef peer = GetPeerRef(nodeid); assert(peer != nullptr); misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score); LOCK(g_peer_mutex); g_peer_map.erase(nodeid); } CNodeState *state = State(nodeid); assert(state != nullptr); if (state->fSyncStarted) { nSyncStarted--; } if (misbehavior == 0 && state->fCurrentlyConnected) { fUpdateConnectionTime = true; } for (const QueuedBlock &entry : state->vBlocksInFlight) { mapBlocksInFlight.erase(entry.hash); } EraseOrphansFor(nodeid); nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(g_outbound_peers_with_protect_from_disconnect >= 0); mapNodeState.erase(nodeid); if (mapNodeState.empty()) { // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); assert(g_outbound_peers_with_protect_from_disconnect == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); } bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) { { LOCK(cs_main); CNodeState *state = State(nodeid); if (state == nullptr) { return false; } stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1; stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1; for (const QueuedBlock &queue : state->vBlocksInFlight) { if (queue.pindex) { stats.vHeightInFlight.push_back(queue.pindex->nHeight); } } } PeerRef peer = GetPeerRef(nodeid); if (peer == nullptr) { return false; } stats.m_misbehavior_score = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score); return true; } ////////////////////////////////////////////////////////////////////////////// // // mapOrphanTransactions // static void AddToCompactExtraTransactions(const CTransactionRef &tx) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN); if (max_extra_txn <= 0) { return; } if (!vExtraTxnForCompact.size()) { vExtraTxnForCompact.resize(max_extra_txn); } vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetHash(), tx); vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn; } bool AddOrphanTx(const CTransactionRef &tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { const TxId &txid = tx->GetId(); if (mapOrphanTransactions.count(txid)) { return false; } // Ignore big transactions, to avoid a send-big-orphans memory exhaustion // attack. If a peer has a legitimate large transaction with a missing // parent then we assume it will rebroadcast it later, after the parent // transaction(s) have been mined or received. // 100 orphans, each of which is at most 100,000 bytes big is at most 10 // megabytes of orphans and somewhat more byprev index (in the worst case): unsigned int sz = tx->GetTotalSize(); if (sz > MAX_STANDARD_TX_SIZE) { LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, txid.ToString()); return false; } auto ret = mapOrphanTransactions.emplace( txid, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME, g_orphan_list.size()}); assert(ret.second); g_orphan_list.push_back(ret.first); for (const CTxIn &txin : tx->vin) { mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first); } AddToCompactExtraTransactions(tx); LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", txid.ToString(), mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size()); return true; } static int EraseOrphanTx(const TxId id) EXCLUSIVE_LOCKS_REQUIRED(g_cs_orphans) { const auto it = mapOrphanTransactions.find(id); if (it == mapOrphanTransactions.end()) { return 0; } for (const CTxIn &txin : it->second.tx->vin) { const auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout); if (itPrev == mapOrphanTransactionsByPrev.end()) { continue; } itPrev->second.erase(it); if (itPrev->second.empty()) { mapOrphanTransactionsByPrev.erase(itPrev); } } size_t old_pos = it->second.list_pos; assert(g_orphan_list[old_pos] == it); if (old_pos + 1 != g_orphan_list.size()) { // Unless we're deleting the last entry in g_orphan_list, move the last // entry to the position we're deleting. auto it_last = g_orphan_list.back(); g_orphan_list[old_pos] = it_last; it_last->second.list_pos = old_pos; } g_orphan_list.pop_back(); mapOrphanTransactions.erase(it); return 1; } void EraseOrphansFor(NodeId peer) { LOCK(g_cs_orphans); int nErased = 0; auto iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { // Increment to avoid iterator becoming invalid. const auto maybeErase = iter++; if (maybeErase->second.fromPeer == peer) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } } if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer); } } unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) { LOCK(g_cs_orphans); unsigned int nEvicted = 0; static int64_t nNextSweep; int64_t nNow = GetTime(); if (nNextSweep <= nNow) { // Sweep out expired orphan pool entries: int nErased = 0; int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL; auto iter = mapOrphanTransactions.begin(); while (iter != mapOrphanTransactions.end()) { const auto maybeErase = iter++; if (maybeErase->second.nTimeExpire <= nNow) { nErased += EraseOrphanTx(maybeErase->second.tx->GetId()); } else { nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime); } } // Sweep again 5 minutes after the next entry that expires in order to // batch the linear scan. nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL; if (nErased > 0) { LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased); } } FastRandomContext rng; while (mapOrphanTransactions.size() > nMaxOrphans) { // Evict a random orphan: size_t randompos = rng.randrange(g_orphan_list.size()); EraseOrphanTx(g_orphan_list[randompos]->first); ++nEvicted; } return nEvicted; } void PeerManager::Misbehaving(const NodeId pnode, const int howmuch, const std::string &message) { assert(howmuch > 0); PeerRef peer = GetPeerRef(pnode); if (peer == nullptr) { return; } LOCK(peer->m_misbehavior_mutex); peer->m_misbehavior_score += howmuch; const std::string message_prefixed = message.empty() ? "" : (": " + message); if (peer->m_misbehavior_score >= DISCOURAGEMENT_THRESHOLD && peer->m_misbehavior_score - howmuch < DISCOURAGEMENT_THRESHOLD) { LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed); peer->m_should_discourage = true; } else { LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s\n", pnode, peer->m_misbehavior_score - howmuch, peer->m_misbehavior_score, message_prefixed); } } /** * Returns true if the given validation state result may result in a peer * banning/disconnecting us. We use this to determine which unaccepted * transactions from a whitelisted peer that we can safely relay. */ static bool TxRelayMayResultInDisconnect(const TxValidationState &state) { return state.GetResult() == TxValidationResult::TX_CONSENSUS; } bool PeerManager::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState &state, bool via_compact_block, const std::string &message) { switch (state.GetResult()) { case BlockValidationResult::BLOCK_RESULT_UNSET: break; // The node is providing invalid data: case BlockValidationResult::BLOCK_CONSENSUS: case BlockValidationResult::BLOCK_MUTATED: if (!via_compact_block) { Misbehaving(nodeid, 100, message); return true; } break; case BlockValidationResult::BLOCK_CACHED_INVALID: { LOCK(cs_main); CNodeState *node_state = State(nodeid); if (node_state == nullptr) { break; } // Ban outbound (but not inbound) peers if on an invalid chain. // Exempt HB compact block peers and manual connections. if (!via_compact_block && !node_state->m_is_inbound && !node_state->m_is_manual_connection) { Misbehaving(nodeid, 100, message); return true; } break; } case BlockValidationResult::BLOCK_INVALID_HEADER: case BlockValidationResult::BLOCK_CHECKPOINT: case BlockValidationResult::BLOCK_INVALID_PREV: Misbehaving(nodeid, 100, message); return true; case BlockValidationResult::BLOCK_FINALIZATION: // TODO: Use the state object to report this is probably not the // best idea. This is effectively unreachable, unless there is a bug // somewhere. Misbehaving(nodeid, 20, message); return true; // Conflicting (but not necessarily invalid) data or different policy: case BlockValidationResult::BLOCK_MISSING_PREV: // TODO: Handle this much more gracefully (10 DoS points is super // arbitrary) Misbehaving(nodeid, 10, message); return true; case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE: case BlockValidationResult::BLOCK_TIME_FUTURE: break; } if (message != "") { LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message); } return false; } bool PeerManager::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState &state, const std::string &message) { switch (state.GetResult()) { case TxValidationResult::TX_RESULT_UNSET: break; // The node is providing invalid data: case TxValidationResult::TX_CONSENSUS: Misbehaving(nodeid, 100, message); return true; // Conflicting (but not necessarily invalid) data or different policy: case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE: case TxValidationResult::TX_NOT_STANDARD: case TxValidationResult::TX_MISSING_INPUTS: case TxValidationResult::TX_PREMATURE_SPEND: case TxValidationResult::TX_CONFLICT: case TxValidationResult::TX_MEMPOOL_POLICY: break; } if (message != "") { LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message); } return false; } ////////////////////////////////////////////////////////////////////////////// // // blockchain -> download logic notification // // To prevent fingerprinting attacks, only send blocks/headers outside of the // active chain if they are no more than a month older (both in time, and in // best equivalent proof of work) than the best header chain we know about and // we fully-validated them at some point. static bool BlockRequestAllowed(const CBlockIndex *pindex, const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); if (::ChainActive().Contains(pindex)) { return true; } return pindex->IsValid(BlockValidity::SCRIPTS) && (pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() < STALE_RELAY_AGE_LIMIT) && (GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, consensusParams) < STALE_RELAY_AGE_LIMIT); } PeerManager::PeerManager(const CChainParams &chainparams, CConnman &connman, BanMan *banman, CScheduler &scheduler, ChainstateManager &chainman, CTxMemPool &pool) : m_chainparams(chainparams), m_connman(connman), m_banman(banman), m_chainman(chainman), m_mempool(pool), m_stale_tip_check_time(0) { // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); // Blocks don't typically have more than 4000 transactions, so this should // be at least six blocks (~1 hr) worth of transactions that we can store. // If the number of transactions appearing in a block goes up, or if we are // seeing getdata requests more than an hour after initial announcement, we // can increase this number. // The false positive rate of 1/1M should come out to less than 1 // transaction per day that would be inadvertently ignored (which is the // same probability that we have in the reject filter). g_recent_confirmed_transactions.reset( new CRollingBloomFilter(24000, 0.000001)); const Consensus::Params &consensusParams = chainparams.GetConsensus(); // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we // combine them in one function and schedule at the quicker (peer-eviction) // timer. static_assert( EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer"); scheduler.scheduleEvery( [this, &consensusParams]() { this->CheckForStaleTipAndEvictPeers(consensusParams); return true; }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL}); // schedule next run for 10-15 minutes in the future const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5}); scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta); } /** * Evict orphan txn pool entries (EraseOrphanTx) based on a newly connected * block. Also save the time of the last tip update. */ void PeerManager::BlockConnected(const std::shared_ptr &pblock, const CBlockIndex *pindex) { { LOCK(g_cs_orphans); std::vector vOrphanErase; for (const CTransactionRef &ptx : pblock->vtx) { const CTransaction &tx = *ptx; // Which orphan pool entries must we evict? for (const auto &txin : tx.vin) { auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout); if (itByPrev == mapOrphanTransactionsByPrev.end()) { continue; } for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) { const CTransaction &orphanTx = *(*mi)->second.tx; const TxId &orphanId = orphanTx.GetId(); vOrphanErase.push_back(orphanId); } } } // Erase orphan transactions included or precluded by this block if (vOrphanErase.size()) { int nErased = 0; for (const auto &orphanId : vOrphanErase) { nErased += EraseOrphanTx(orphanId); } LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } g_last_tip_update = GetTime(); } { LOCK(g_cs_recent_confirmed_transactions); for (const CTransactionRef &ptx : pblock->vtx) { g_recent_confirmed_transactions->insert(ptx->GetId()); } } } void PeerManager::BlockDisconnected(const std::shared_ptr &block, const CBlockIndex *pindex) { // To avoid relay problems with transactions that were previously // confirmed, clear our filter of recently confirmed transactions whenever // there's a reorg. // This means that in a 1-block reorg (where 1 block is disconnected and // then another block reconnected), our filter will drop to having only one // block's worth of transactions in it, but that should be fine, since // presumably the most common case of relaying a confirmed transaction // should be just after a new block containing it is found. LOCK(g_cs_recent_confirmed_transactions); g_recent_confirmed_transactions->reset(); } // All of the following cache a recent block, and are protected by // cs_most_recent_block static RecursiveMutex cs_most_recent_block; static std::shared_ptr most_recent_block GUARDED_BY(cs_most_recent_block); static std::shared_ptr most_recent_compact_block GUARDED_BY(cs_most_recent_block); static uint256 most_recent_block_hash GUARDED_BY(cs_most_recent_block); /** * Maintain state about the best-seen block and fast-announce a compact block * to compatible peers. */ void PeerManager::NewPoWValidBlock( const CBlockIndex *pindex, const std::shared_ptr &pblock) { std::shared_ptr pcmpctblock = std::make_shared(*pblock); const CNetMsgMaker msgMaker(PROTOCOL_VERSION); LOCK(cs_main); static int nHighestFastAnnounce = 0; if (pindex->nHeight <= nHighestFastAnnounce) { return; } nHighestFastAnnounce = pindex->nHeight; uint256 hashBlock(pblock->GetHash()); { LOCK(cs_most_recent_block); most_recent_block_hash = hashBlock; most_recent_block = pblock; most_recent_compact_block = pcmpctblock; } m_connman.ForEachNode([this, &pcmpctblock, pindex, &msgMaker, &hashBlock](CNode *pnode) { AssertLockHeld(cs_main); // TODO: Avoid the repeated-serialization here if (pnode->GetCommonVersion() < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect) { return; } ProcessBlockAvailability(pnode->GetId()); CNodeState &state = *State(pnode->GetId()); // If the peer has, or we announced to them the previous block already, // but we don't think they have this one, go ahead and announce it. if (state.fPreferHeaderAndIDs && !PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) { LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerManager::NewPoWValidBlock", hashBlock.ToString(), pnode->GetId()); m_connman.PushMessage( pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock)); state.pindexBestHeaderSent = pindex; } }); } /** * Update our best height and announce any block hashes which weren't previously * in ::ChainActive() to our peers. */ void PeerManager::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) { const int nNewHeight = pindexNew->nHeight; m_connman.SetBestHeight(nNewHeight); SetServiceFlagsIBDCache(!fInitialDownload); if (!fInitialDownload) { // Find the hashes of all blocks that weren't previously in the best // chain. std::vector vHashes; const CBlockIndex *pindexToAnnounce = pindexNew; while (pindexToAnnounce != pindexFork) { vHashes.push_back(pindexToAnnounce->GetBlockHash()); pindexToAnnounce = pindexToAnnounce->pprev; if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) { // Limit announcements in case of a huge reorganization. Rely on // the peer's synchronization mechanism in that case. break; } } // Relay inventory, but don't relay old inventory during initial block // download. m_connman.ForEachNode([nNewHeight, &vHashes](CNode *pnode) { if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) { for (const BlockHash &hash : reverse_iterate(vHashes)) { pnode->PushBlockHash(hash); } } }); m_connman.WakeMessageHandler(); } } /** * Handle invalid block rejection and consequent peer banning, maintain which * peers announce compact blocks. */ void PeerManager::BlockChecked(const CBlock &block, const BlockValidationState &state) { LOCK(cs_main); const BlockHash hash = block.GetHash(); std::map>::iterator it = mapBlockSource.find(hash); // If the block failed validation, we know where it came from and we're // still connected to that peer, maybe punish. if (state.IsInvalid() && it != mapBlockSource.end() && State(it->second.first)) { MaybePunishNodeForBlock(/*nodeid=*/it->second.first, state, /*via_compact_block=*/!it->second.second); } // Check that: // 1. The block is valid // 2. We're not in initial block download // 3. This is currently the best block we're aware of. We haven't updated // the tip yet so we have no way to check this directly here. Instead we // just check that there are currently no other blocks in flight. else if (state.IsValid() && !::ChainstateActive().IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman); } } if (it != mapBlockSource.end()) { mapBlockSource.erase(it); } } ////////////////////////////////////////////////////////////////////////////// // // Messages // static bool AlreadyHave(const CInv &inv, const CTxMemPool &mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { switch (inv.type) { case MSG_TX: { assert(recentRejects); if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { // If the chain tip has changed previously rejected transactions // might be now valid, e.g. due to a nLockTime'd tx becoming // valid, or a double-spend. Reset the rejects filter and give // those txs a second chance. hashRecentRejectsChainTip = ::ChainActive().Tip()->GetBlockHash(); recentRejects->reset(); } const TxId txid(inv.hash); { LOCK(g_cs_orphans); if (mapOrphanTransactions.count(txid)) { return true; } } { LOCK(g_cs_recent_confirmed_transactions); if (g_recent_confirmed_transactions->contains(txid)) { return true; } } return recentRejects->contains(txid) || mempool.exists(txid); } case MSG_BLOCK: return LookupBlockIndex(BlockHash(inv.hash)) != nullptr; } // Don't know what it is, just say we already got one return true; } void RelayTransaction(const TxId &txid, const CConnman &connman) { connman.ForEachNode( [&txid](CNode *pnode) { pnode->PushTxInventory(txid); }); } static void RelayAddress(const CAddress &addr, bool fReachable, const CConnman &connman) { // Limited relaying of addresses outside our network(s) unsigned int nRelayNodes = fReachable ? 2 : 1; // Relay to a limited number of other nodes. // Use deterministic randomness to send to the same nodes for 24 hours at a // time so the m_addr_knowns of the chosen nodes prevent repeats uint64_t hashAddr = addr.GetHash(); const CSipHasher hasher = connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY) .Write(hashAddr << 32) .Write((GetTime() + hashAddr) / (24 * 60 * 60)); FastRandomContext insecure_rand; std::array, 2> best{ {{0, nullptr}, {0, nullptr}}}; assert(nRelayNodes <= best.size()); auto sortfunc = [&best, &hasher, nRelayNodes](CNode *pnode) { if (pnode->IsAddrRelayPeer()) { uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize(); for (unsigned int i = 0; i < nRelayNodes; i++) { if (hashKey > best[i].first) { std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1); best[i] = std::make_pair(hashKey, pnode); break; } } } }; auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] { for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) { best[i].second->PushAddress(addr, insecure_rand); } }; connman.ForEachNodeThen(std::move(sortfunc), std::move(pushfunc)); } static void ProcessGetBlockData(const Config &config, CNode &pfrom, const CInv &inv, CConnman &connman, const std::atomic &interruptMsgProc) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); const BlockHash hash(inv.hash); bool send = false; std::shared_ptr a_recent_block; std::shared_ptr a_recent_compact_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; a_recent_compact_block = most_recent_compact_block; } bool need_activate_chain = false; { LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BlockValidity::SCRIPTS) && pindex->IsValid(BlockValidity::TREE)) { // If we have the block and all of its parents, but have not yet // validated it, we might be in the middle of connecting it (ie // in the unlock of cs_main before ActivateBestChain but after // AcceptBlock). In this case, we need to run ActivateBestChain // prior to checking the relay conditions below. need_activate_chain = true; } } } // release cs_main before calling ActivateBestChain if (need_activate_chain) { BlockValidationState state; if (!ActivateBestChain(config, state, a_recent_block)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(hash); if (pindex) { send = BlockRequestAllowed(pindex, consensusParams); if (!send) { LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old " "block that isn't in the main chain\n", __func__, pfrom.GetId()); } } const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); // Disconnect node in case we have reached the outbound limit for serving // historical blocks. // Never disconnect whitelisted nodes. if (send && connman.OutboundTargetReached(true) && (((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - pindex->GetBlockTime() > HISTORICAL_BLOCK_AGE)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom.GetId()); // disconnect node pfrom.fDisconnect = true; send = false; } // Avoid leaking prune-height by never sending blocks below the // NODE_NETWORK_LIMITED threshold. // Add two blocks buffer extension for possible races if (send && !pfrom.HasPermission(PF_NOBAN) && ((((pfrom.GetLocalServices() & NODE_NETWORK_LIMITED) == NODE_NETWORK_LIMITED) && ((pfrom.GetLocalServices() & NODE_NETWORK) != NODE_NETWORK) && (::ChainActive().Tip()->nHeight - pindex->nHeight > (int)NODE_NETWORK_LIMITED_MIN_BLOCKS + 2)))) { LogPrint(BCLog::NET, "Ignore block request below NODE_NETWORK_LIMITED " "threshold from peer=%d\n", pfrom.GetId()); // disconnect node and prevent it from stalling (would otherwise wait // for the missing block) pfrom.fDisconnect = true; send = false; } // Pruned nodes may have deleted the block, so check whether it's available // before trying to send. if (send && pindex->nStatus.hasData()) { std::shared_ptr pblock; if (a_recent_block && a_recent_block->GetHash() == pindex->GetBlockHash()) { pblock = a_recent_block; } else { // Send block from disk std::shared_ptr pblockRead = std::make_shared(); if (!ReadBlockFromDisk(*pblockRead, pindex, consensusParams)) { assert(!"cannot load block from disk"); } pblock = pblockRead; } if (inv.type == MSG_BLOCK) { connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock)); } else if (inv.type == MSG_FILTERED_BLOCK) { bool sendMerkleBlock = false; CMerkleBlock merkleBlock; if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); if (pfrom.m_tx_relay->pfilter) { sendMerkleBlock = true; merkleBlock = CMerkleBlock(*pblock, *pfrom.m_tx_relay->pfilter); } } if (sendMerkleBlock) { connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock)); // CMerkleBlock just contains hashes, so also push any // transactions in the block the client did not see. This avoids // hurting performance by pointlessly requiring a round-trip. // Note that there is currently no way for a node to request any // single transactions we didn't send here - they must either // disconnect and retry or request the full block. Thus, the // protocol spec specified allows for us to provide duplicate // txn here, however we MUST always provide at least what the // remote peer needs. typedef std::pair PairType; for (PairType &pair : merkleBlock.vMatchedTxn) { connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::TX, *pblock->vtx[pair.first])); } } // else // no response } else if (inv.type == MSG_CMPCT_BLOCK) { // If a peer is asking for old blocks, we're almost guaranteed they // won't have a useful mempool to match against a compact block, and // we don't feel like constructing the object for them, so instead // we respond with the full, non-compact block. int nSendFlags = 0; if (CanDirectFetch(consensusParams) && pindex->nHeight >= ::ChainActive().Height() - MAX_CMPCTBLOCK_DEPTH) { CBlockHeaderAndShortTxIDs cmpctblock(*pblock); connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } else { connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock)); } } // Trigger the peer node to send a getblocks request for the next batch // of inventory. if (hash == pfrom.hashContinue) { // Bypass PushBlockInventory, this must send even if redundant, and // we want it right after the last block so they don't wait for // other stuff first. std::vector vInv; vInv.push_back( CInv(MSG_BLOCK, ::ChainActive().Tip()->GetBlockHash())); connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::INV, vInv)); pfrom.hashContinue = BlockHash(); } } } //! Determine whether or not a peer can request a transaction, and return it (or //! nullptr if not found or not allowed). CTransactionRef static FindTxForGetData( const CNode &peer, const TxId &txid, const std::chrono::seconds mempool_req, const std::chrono::seconds longlived_mempool_time) LOCKS_EXCLUDED(cs_main) { // Check if the requested transaction is so recent that we're just // about to announce it to the peer; if so, they certainly shouldn't // know we already have it. { LOCK(peer.m_tx_relay->cs_tx_inventory); if (peer.m_tx_relay->setInventoryTxToSend.count(txid)) { return {}; } } { LOCK(cs_main); // Look up transaction in relay pool auto mi = mapRelay.find(txid); if (mi != mapRelay.end()) { return mi->second; } } auto txinfo = g_mempool.info(txid); if (txinfo.tx) { // To protect privacy, do not answer getdata using the mempool when // that TX couldn't have been INVed in reply to a MEMPOOL request, // or when it's too recent to have expired from mapRelay. if ((mempool_req.count() && txinfo.m_time <= mempool_req) || txinfo.m_time <= longlived_mempool_time) { return txinfo.tx; } } return {}; } static void ProcessGetData(const Config &config, CNode &pfrom, CConnman &connman, CTxMemPool &mempool, const std::atomic &interruptMsgProc) LOCKS_EXCLUDED(cs_main) { AssertLockNotHeld(cs_main); std::deque::iterator it = pfrom.vRecvGetData.begin(); std::vector vNotFound; const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); // mempool entries added before this time have likely expired from mapRelay const std::chrono::seconds longlived_mempool_time = GetTime() - RELAY_TX_CACHE_TIME; // Get last mempool request time const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min(); // Process as many TX items from the front of the getdata queue as // possible, since they're common and it's efficient to batch process // them. while (it != pfrom.vRecvGetData.end() && it->type == MSG_TX) { if (interruptMsgProc) { return; } // The send buffer provides backpressure. If there's no space in // the buffer, pause processing until the next call. if (pfrom.fPauseSend) { break; } const CInv &inv = *it++; if (pfrom.m_tx_relay == nullptr) { // Ignore GETDATA requests for transactions from blocks-only // peers. continue; } CTransactionRef tx = FindTxForGetData( pfrom, TxId{inv.hash}, mempool_req, longlived_mempool_time); if (tx) { int nSendFlags = 0; connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); mempool.RemoveUnbroadcastTx(TxId(inv.hash)); } else { vNotFound.push_back(inv); } } // Only process one BLOCK item per call, since they're uncommon and can be // expensive to process. if (it != pfrom.vRecvGetData.end() && !pfrom.fPauseSend) { const CInv &inv = *it++; if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK) { ProcessGetBlockData(config, pfrom, inv, connman, interruptMsgProc); } // else: If the first item on the queue is an unknown type, we erase it // and continue processing the queue on the next call. } pfrom.vRecvGetData.erase(pfrom.vRecvGetData.begin(), it); if (!vNotFound.empty()) { // Let the peer know that we didn't find what it asked for, so it // doesn't have to wait around forever. SPV clients care about this // message: it's needed when they are recursively walking the // dependencies of relevant unconfirmed transactions. SPV clients want // to do that because they want to know about (and store and rebroadcast // and risk analyze) the dependencies of transactions relevant to them, // without having to download the entire memory pool. Also, other nodes // can use these messages to automatically request a transaction from // some other peer that annnounced it, and stop waiting for us to // respond. In normal operation, we often send NOTFOUND messages for // parents of transactions that we relay; if a peer is missing a parent, // they may assume we have them and request the parents from us. connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } void PeerManager::SendBlockTransactions(CNode &pfrom, const CBlock &block, const BlockTransactionsRequest &req) { BlockTransactions resp(req); for (size_t i = 0; i < req.indices.size(); i++) { if (req.indices[i] >= block.vtx.size()) { Misbehaving(pfrom, 100, "getblocktxn with out-of-bounds tx indices"); return; } resp.txn[i] = block.vtx[req.indices[i]]; } LOCK(cs_main); const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); int nSendFlags = 0; m_connman.PushMessage( &pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp)); } void PeerManager::ProcessHeadersMessage( const Config &config, CNode &pfrom, const std::vector &headers, bool via_compact_block) { const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); size_t nCount = headers.size(); if (nCount == 0) { // Nothing interesting. Stop asking this peers for more headers. return; } bool received_new_header = false; const CBlockIndex *pindexLast = nullptr; { LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); // If this looks like it could be a block announcement (nCount < // MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that // don't connect: // - Send a getheaders message in response to try to connect the chain. // - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that // don't connect before giving DoS points // - Once a headers message is received that is valid and does connect, // nUnconnectingHeaders gets reset back to 0. if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); LogPrint( BCLog::NET, "received header %s: missing prev block %s, sending getheaders " "(%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", headers[0].GetHash().ToString(), headers[0].hashPrevBlock.ToString(), pindexBestHeader->nHeight, pfrom.GetId(), nodestate->nUnconnectingHeaders); // Set hashLastUnknownBlock for this peer, so that if we eventually // get the headers - even from a different peer - we can use this // peer to download. UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()); if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) { // The peer is sending us many headers we can't connect. Misbehaving(pfrom, 20, strprintf("%d non-connecting headers", nodestate->nUnconnectingHeaders)); } return; } BlockHash hashLastBlock; for (const CBlockHeader &header : headers) { if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) { Misbehaving(pfrom, 20, "non-continuous headers sequence"); return; } hashLastBlock = header.GetHash(); } // If we don't have the last header, then they'll have given us // something new (if these headers are valid). if (!LookupBlockIndex(hashLastBlock)) { received_new_header = true; } } BlockValidationState state; if (!m_chainman.ProcessNewBlockHeaders(config, headers, state, &pindexLast)) { if (state.IsInvalid()) { MaybePunishNodeForBlock(pfrom.GetId(), state, via_compact_block, "invalid header received"); return; } } { LOCK(cs_main); CNodeState *nodestate = State(pfrom.GetId()); if (nodestate->nUnconnectingHeaders > 0) { LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom.GetId(), nodestate->nUnconnectingHeaders); } nodestate->nUnconnectingHeaders = 0; assert(pindexLast); UpdateBlockAvailability(pfrom.GetId(), pindexLast->GetBlockHash()); // From here, pindexBestKnownBlock should be guaranteed to be non-null, // because it is set in UpdateBlockAvailability. Some nullptr checks are // still present, however, as belt-and-suspenders. if (received_new_header && pindexLast->nChainWork > ::ChainActive().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } if (nCount == MAX_HEADERS_RESULTS) { // Headers message had its maximum size; the peer may have more // headers. // TODO: optimize: if pindexLast is an ancestor of // ::ChainActive().Tip or pindexBestHeader, continue from there // instead. LogPrint( BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom.GetId(), pfrom.nStartingHeight); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexLast), uint256())); } bool fCanDirectFetch = CanDirectFetch(m_chainparams.GetConsensus()); // If this set of headers is valid and ends in a block with at least as // much work as our tip, download as much as possible. if (fCanDirectFetch && pindexLast->IsValid(BlockValidity::TREE) && ::ChainActive().Tip()->nChainWork <= pindexLast->nChainWork) { std::vector vToFetch; const CBlockIndex *pindexWalk = pindexLast; // Calculate all the blocks we'd need to switch to pindexLast, up to // a limit. while (pindexWalk && !::ChainActive().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!pindexWalk->nStatus.hasData() && !mapBlocksInFlight.count(pindexWalk->GetBlockHash())) { // We don't have this block, and it's not yet in flight. vToFetch.push_back(pindexWalk); } pindexWalk = pindexWalk->pprev; } // If pindexWalk still isn't on our main chain, we're looking at a // very large reorg at a time we think we're close to caught up to // the main chain -- this shouldn't really happen. Bail out on the // direct fetch and rely on parallel download instead. if (!::ChainActive().Contains(pindexWalk)) { LogPrint( BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } else { std::vector vGetData; // Download as much as possible, from earliest to latest. for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) { if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { // Can't download any more from this peer break; } vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); MarkBlockAsInFlight(config, m_mempool, pfrom.GetId(), pindex->GetBlockHash(), m_chainparams.GetConsensus(), pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom.GetId()); } if (vGetData.size() > 1) { LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers " "direct fetch\n", pindexLast->GetBlockHash().ToString(), pindexLast->nHeight); } if (vGetData.size() > 0) { if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BlockValidity::CHAIN)) { // In any case, we want to download using a compact // block, not a regular one. vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash); } m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } } } // If we're in IBD, we want outbound peers that will serve us a useful // chain. Disconnect peers that are on chains with insufficient work. if (::ChainstateActive().IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) { // When nCount < MAX_HEADERS_RESULTS, we know we have no more // headers to fetch from this peer. if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) { // This peer has too little work on their headers chain to help // us sync -- disconnect if using an outbound slot (unless // whitelisted or addnode). // Note: We compare their tip to nMinimumChainWork (rather than // ::ChainActive().Tip()) because we won't start block download // until we have a headers chain that has at least // nMinimumChainWork, even if a peer has a chain past our tip, // as an anti-DoS measure. if (pfrom.IsOutboundOrBlockRelayConn()) { LogPrintf("Disconnecting outbound peer %d -- headers " "chain has insufficient work\n", pfrom.GetId()); pfrom.fDisconnect = true; } } } if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) { // If this is an outbound full-relay peer, check to see if we should // protect it from the bad/lagging chain logic. Note that // block-relay-only peers are already implicitly protected, so we // only consider setting m_protect for the full-relay peers. if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); nodestate->m_chain_sync.m_protect = true; ++g_outbound_peers_with_protect_from_disconnect; } } } } void PeerManager::ProcessOrphanTx(const Config &config, std::set &orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans) { AssertLockHeld(cs_main); AssertLockHeld(g_cs_orphans); std::unordered_map rejectCountPerNode; bool done = false; while (!done && !orphan_work_set.empty()) { const TxId orphanTxId = *orphan_work_set.begin(); orphan_work_set.erase(orphan_work_set.begin()); auto orphan_it = mapOrphanTransactions.find(orphanTxId); if (orphan_it == mapOrphanTransactions.end()) { continue; } const CTransactionRef porphanTx = orphan_it->second.tx; const CTransaction &orphanTx = *porphanTx; NodeId fromPeer = orphan_it->second.fromPeer; // Use a new TxValidationState because orphans come from different peers // (and we call MaybePunishNodeForTx based on the source peer from the // orphan map, not based on the peer that relayed the previous // transaction). TxValidationState orphan_state; auto it = rejectCountPerNode.find(fromPeer); if (it != rejectCountPerNode.end() && it->second > MAX_NON_STANDARD_ORPHAN_PER_NODE) { continue; } if (AcceptToMemoryPool(config, m_mempool, orphan_state, porphanTx, false /* bypass_limits */, Amount::zero() /* nAbsurdFee */)) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanTxId.ToString()); RelayTransaction(orphanTxId, m_connman); for (size_t i = 0; i < orphanTx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(orphanTxId, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto &elem : it_by_prev->second) { orphan_work_set.insert(elem->first); } } } EraseOrphanTx(orphanTxId); done = true; } else if (orphan_state.GetResult() != TxValidationResult::TX_MISSING_INPUTS) { if (orphan_state.IsInvalid()) { // Punish peer that gave us an invalid orphan tx MaybePunishNodeForTx(fromPeer, orphan_state); LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanTxId.ToString()); } // Has inputs but not accepted to mempool // Probably non-standard or insufficient fee LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanTxId.ToString()); assert(recentRejects); recentRejects->insert(orphanTxId); EraseOrphanTx(orphanTxId); done = true; } m_mempool.check(&::ChainstateActive().CoinsTip()); } } /** * Validation logic for compact filters request handling. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] chain_params Chain parameters * @param[in] filter_type The filter type the request is for. Must be * basic filters. * @param[in] start_height The start height for the request * @param[in] stop_hash The stop_hash for the request * @param[in] max_height_diff The maximum number of items permitted to * request, as specified in BIP 157 * @param[out] stop_index The CBlockIndex for the stop_hash block, if the * request can be serviced. * @param[out] filter_index The filter index, if the request can be * serviced. * @return True if the request can be serviced. */ static bool PrepareBlockFilterRequest( CNode &peer, const CChainParams &chain_params, BlockFilterType filter_type, uint32_t start_height, const BlockHash &stop_hash, uint32_t max_height_diff, const CBlockIndex *&stop_index, BlockFilterIndex *&filter_index) { const bool supported_filter_type = (filter_type == BlockFilterType::BASIC && (peer.GetLocalServices() & NODE_COMPACT_FILTERS)); if (!supported_filter_type) { LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n", peer.GetId(), static_cast(filter_type)); peer.fDisconnect = true; return false; } { LOCK(cs_main); stop_index = LookupBlockIndex(stop_hash); // Check that the stop block exists and the peer would be allowed to // fetch it. if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) { LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n", peer.GetId(), stop_hash.ToString()); peer.fDisconnect = true; return false; } } uint32_t stop_height = stop_index->nHeight; if (start_height > stop_height) { LogPrint( BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */ "start height %d and stop height %d\n", peer.GetId(), start_height, stop_height); peer.fDisconnect = true; return false; } if (stop_height - start_height >= max_height_diff) { LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n", peer.GetId(), stop_height - start_height + 1, max_height_diff); peer.fDisconnect = true; return false; } filter_index = GetBlockFilterIndex(filter_type); if (!filter_index) { LogPrint(BCLog::NET, "Filter index for supported type %s not found\n", BlockFilterTypeName(filter_type)); return false; } return true; } /** * Handle a cfilters request. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ static void ProcessGetCFilters(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman) { uint8_t filter_type_ser; uint32_t start_height; BlockHash stop_hash; vRecv >> filter_type_ser >> start_height >> stop_hash; const BlockFilterType filter_type = static_cast(filter_type_ser); const CBlockIndex *stop_index; BlockFilterIndex *filter_index; if (!PrepareBlockFilterRequest( peer, chain_params, filter_type, start_height, stop_hash, MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { return; } std::vector filters; if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, " "start_height=%d, stop_hash=%s\n", BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); return; } for (const auto &filter : filters) { CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion()) .Make(NetMsgType::CFILTER, filter); connman.PushMessage(&peer, std::move(msg)); } } /** * Handle a cfheaders request. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ static void ProcessGetCFHeaders(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman) { uint8_t filter_type_ser; uint32_t start_height; BlockHash stop_hash; vRecv >> filter_type_ser >> start_height >> stop_hash; const BlockFilterType filter_type = static_cast(filter_type_ser); const CBlockIndex *stop_index; BlockFilterIndex *filter_index; if (!PrepareBlockFilterRequest( peer, chain_params, filter_type, start_height, stop_hash, MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { return; } uint256 prev_header; if (start_height > 0) { const CBlockIndex *const prev_block = stop_index->GetAncestor(static_cast(start_height - 1)); if (!filter_index->LookupFilterHeader(prev_block, prev_header)) { LogPrint(BCLog::NET, "Failed to find block filter header in index: " "filter_type=%s, block_hash=%s\n", BlockFilterTypeName(filter_type), prev_block->GetBlockHash().ToString()); return; } } std::vector filter_hashes; if (!filter_index->LookupFilterHashRange(start_height, stop_index, filter_hashes)) { LogPrint(BCLog::NET, "Failed to find block filter hashes in index: filter_type=%s, " "start_height=%d, stop_hash=%s\n", BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); return; } CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion()) .Make(NetMsgType::CFHEADERS, filter_type_ser, stop_index->GetBlockHash(), prev_header, filter_hashes); connman.PushMessage(&peer, std::move(msg)); } /** * Handle a getcfcheckpt request. * * May disconnect from the peer in the case of a bad request. * * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ static void ProcessGetCFCheckPt(CNode &peer, CDataStream &vRecv, const CChainParams &chain_params, CConnman &connman) { uint8_t filter_type_ser; BlockHash stop_hash; vRecv >> filter_type_ser >> stop_hash; const BlockFilterType filter_type = static_cast(filter_type_ser); const CBlockIndex *stop_index; BlockFilterIndex *filter_index; if (!PrepareBlockFilterRequest( peer, chain_params, filter_type, /*start_height=*/0, stop_hash, /*max_height_diff=*/std::numeric_limits::max(), stop_index, filter_index)) { return; } std::vector headers(stop_index->nHeight / CFCHECKPT_INTERVAL); // Populate headers. const CBlockIndex *block_index = stop_index; for (int i = headers.size() - 1; i >= 0; i--) { int height = (i + 1) * CFCHECKPT_INTERVAL; block_index = block_index->GetAncestor(height); if (!filter_index->LookupFilterHeader(block_index, headers[i])) { LogPrint(BCLog::NET, "Failed to find block filter header in index: " "filter_type=%s, block_hash=%s\n", BlockFilterTypeName(filter_type), block_index->GetBlockHash().ToString()); return; } } CSerializedNetMsg msg = CNetMsgMaker(peer.GetCommonVersion()) .Make(NetMsgType::CFCHECKPT, filter_type_ser, stop_index->GetBlockHash(), headers); connman.PushMessage(&peer, std::move(msg)); } void PeerManager::ProcessMessage(const Config &config, CNode &pfrom, const std::string &msg_type, CDataStream &vRecv, const std::chrono::microseconds time_received, const std::atomic &interruptMsgProc) { LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(msg_type), vRecv.size(), pfrom.GetId()); if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0) { LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n"); return; } if (!(pfrom.GetLocalServices() & NODE_BLOOM) && (msg_type == NetMsgType::FILTERLOAD || msg_type == NetMsgType::FILTERADD)) { if (pfrom.nVersion >= NO_BLOOM_VERSION) { Misbehaving(pfrom, 100, "no-bloom-version"); } else { pfrom.fDisconnect = true; } return; } if (msg_type == NetMsgType::VERSION) { // Each connection can only send one version message if (pfrom.nVersion != 0) { Misbehaving(pfrom, 1, "redundant version message"); return; } int64_t nTime; CAddress addrMe; CAddress addrFrom; uint64_t nNonce = 1; uint64_t nServiceInt; ServiceFlags nServices; int nVersion; std::string cleanSubVer; int nStartingHeight = -1; bool fRelay = true; uint64_t nExtraEntropy = 1; vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; nServices = ServiceFlags(nServiceInt); if (!pfrom.IsInboundConn()) { m_connman.SetServices(pfrom.addr, nServices); } if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) { LogPrint(BCLog::NET, "peer=%d does not offer the expected services " "(%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices)); pfrom.fDisconnect = true; return; } if (nVersion < MIN_PEER_PROTO_VERSION) { // disconnect from peers older than this proto version LogPrint(BCLog::NET, "peer=%d using obsolete version %i; disconnecting\n", pfrom.GetId(), nVersion); pfrom.fDisconnect = true; return; } if (!vRecv.empty()) { vRecv >> addrFrom >> nNonce; } if (!vRecv.empty()) { std::string strSubVer; vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH); cleanSubVer = SanitizeString(strSubVer); } if (!vRecv.empty()) { vRecv >> nStartingHeight; } if (!vRecv.empty()) { vRecv >> fRelay; } if (!vRecv.empty()) { vRecv >> nExtraEntropy; } // Disconnect if we connected to ourself if (pfrom.IsInboundConn() && !m_connman.CheckIncomingNonce(nNonce)) { LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString()); pfrom.fDisconnect = true; return; } if (pfrom.IsInboundConn() && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear if (pfrom.IsInboundConn()) { PushNodeVersion(config, pfrom, m_connman, GetAdjustedTime()); } // Change version const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION); pfrom.SetCommonVersion(greatest_common_version); pfrom.nVersion = nVersion; - m_connman.PushMessage( - &pfrom, - CNetMsgMaker(greatest_common_version).Make(NetMsgType::VERACK)); + const CNetMsgMaker msg_maker(greatest_common_version); + + m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK)); + + // Signal ADDRv2 support (BIP155). + m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2)); pfrom.nServices = nServices; pfrom.SetAddrLocal(addrMe); { LOCK(pfrom.cs_SubVer); pfrom.cleanSubVer = cleanSubVer; } pfrom.nStartingHeight = nStartingHeight; // set nodes not relaying blocks and tx and not serving (parts) of the // historical blockchain as "clients" pfrom.fClient = (!(nServices & NODE_NETWORK) && !(nServices & NODE_NETWORK_LIMITED)); // set nodes not capable of serving the complete blockchain history as // "limited nodes" pfrom.m_limited_node = (!(nServices & NODE_NETWORK) && (nServices & NODE_NETWORK_LIMITED)); if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); // set to true after we get the first filter* message pfrom.m_tx_relay->fRelayTxes = fRelay; } pfrom.nRemoteHostNonce = nNonce; pfrom.nRemoteExtraEntropy = nExtraEntropy; // Potentially mark this peer as a preferred download peer. { LOCK(cs_main); UpdatePreferredDownload(pfrom, State(pfrom.GetId())); } if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer()) { // Advertise our address if (fListen && !::ChainstateActive().IsInitialBlockDownload()) { CAddress addr = GetLocalAddress(&pfrom.addr, pfrom.GetLocalServices()); FastRandomContext insecure_rand; if (addr.IsRoutable()) { LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom.PushAddress(addr, insecure_rand); } else if (IsPeerAddrLocalGood(&pfrom)) { addr.SetIP(addrMe); LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString()); pfrom.PushAddress(addr, insecure_rand); } } // Get recent addresses m_connman.PushMessage(&pfrom, CNetMsgMaker(greatest_common_version) .Make(NetMsgType::GETADDR)); pfrom.fGetAddr = true; m_connman.MarkAddressGood(pfrom.addr); } std::string remoteAddr; if (fLogIPs) { remoteAddr = ", peeraddr=" + pfrom.addr.ToString(); } LogPrint(BCLog::NET, "receive version message: [%s] %s: version %d, blocks=%d, " "us=%s, peer=%d%s\n", pfrom.addr.ToString(), cleanSubVer, pfrom.nVersion, pfrom.nStartingHeight, addrMe.ToString(), pfrom.GetId(), remoteAddr); // Ignore time offsets that are improbable (before the Genesis block) // and may underflow the nTimeOffset calculation. int64_t currentTime = GetTime(); if (nTime >= int64_t(m_chainparams.GenesisBlock().nTime)) { int64_t nTimeOffset = nTime - currentTime; pfrom.nTimeOffset = nTimeOffset; AddTimeData(pfrom.addr, nTimeOffset); } else { Misbehaving(pfrom, 20, "Ignoring invalid timestamp in version message"); } // Feeler connections exist only to verify if address is online. if (pfrom.IsFeelerConn()) { pfrom.fDisconnect = true; } return; } if (pfrom.nVersion == 0) { // Must have a version message before anything else Misbehaving(pfrom, 10, "non-version message before version handshake"); return; } // At this point, the outgoing message serialization version can't change. const CNetMsgMaker msgMaker(pfrom.GetCommonVersion()); if (msg_type == NetMsgType::VERACK) { if (!pfrom.IsInboundConn()) { // Mark this node as currently connected, so we update its timestamp // later. LOCK(cs_main); State(pfrom.GetId())->fCurrentlyConnected = true; LogPrintf( "New outbound peer connected: version: %d, blocks=%d, " "peer=%d%s (%s)\n", pfrom.nVersion.load(), pfrom.nStartingHeight, pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""), pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay"); } if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) { // Tell our peer we prefer to receive headers rather than inv's // We send this to non-NODE NETWORK peers as well, because even // non-NODE NETWORK peers can announce blocks (such as pruning // nodes) m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDHEADERS)); } if (pfrom.GetCommonVersion() >= SHORT_IDS_BLOCKS_VERSION) { // Tell our peer we are willing to provide version 1 or 2 // cmpctblocks. However, we do not request new block announcements // using cmpctblock messages. We send this to non-NODE NETWORK peers // as well, because they may wish to request compact blocks from us. bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 1; m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion)); } if ((pfrom.nServices & NODE_AVALANCHE) && g_avalanche && gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { if (g_avalanche->sendHello(&pfrom)) { LogPrint(BCLog::NET, "Send avahello to peer %d\n", pfrom.GetId()); } } pfrom.fSuccessfullyConnected = true; return; } if (!pfrom.fSuccessfullyConnected) { // Must have a verack message before anything else Misbehaving(pfrom, 10, "non-verack message before version handshake"); return; } - if (msg_type == NetMsgType::ADDR) { + if (msg_type == NetMsgType::ADDR || msg_type == NetMsgType::ADDRV2) { + int stream_version = vRecv.GetVersion(); + if (msg_type == NetMsgType::ADDRV2) { + // Add ADDRV2_FORMAT to the version so that the CNetAddr and + // CAddress unserialize methods know that an address in v2 format is + // coming. + stream_version |= ADDRV2_FORMAT; + } + + OverrideStream s(&vRecv, vRecv.GetType(), stream_version); std::vector vAddr; - vRecv >> vAddr; + + s >> vAddr; if (!pfrom.IsAddrRelayPeer()) { return; } if (vAddr.size() > 1000) { - Misbehaving(pfrom, 20, - strprintf("oversized-addr: message addr size() = %u", - vAddr.size())); + Misbehaving( + pfrom, 20, + strprintf("%s message size = %u", msg_type, vAddr.size())); return; } // Store the new addresses std::vector vAddrOk; int64_t nNow = GetAdjustedTime(); int64_t nSince = nNow - 10 * 60; for (CAddress &addr : vAddr) { if (interruptMsgProc) { return; } // We only bother storing full nodes, though this may include things // which we would not make an outbound connection to, in part // because we may make feeler connections to them. if (!MayHaveUsefulAddressDB(addr.nServices) && !HasAllDesirableServiceFlags(addr.nServices)) { continue; } if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60) { addr.nTime = nNow - 5 * 24 * 60 * 60; } pfrom.AddAddressKnown(addr); if (m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr))) { // Do not process banned/discouraged addresses beyond // remembering we received them continue; } bool fReachable = IsReachable(addr); if (addr.nTime > nSince && !pfrom.fGetAddr && vAddr.size() <= 10 && addr.IsRoutable()) { // Relay to a limited number of other nodes RelayAddress(addr, fReachable, m_connman); } // Do not store addresses outside our network if (fReachable) { vAddrOk.push_back(addr); } } m_connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60); if (vAddr.size() < 1000) { pfrom.fGetAddr = false; } if (pfrom.IsAddrFetchConn()) { pfrom.fDisconnect = true; } return; } + if (msg_type == NetMsgType::SENDADDRV2) { + pfrom.m_wants_addrv2 = true; + return; + } + if (msg_type == NetMsgType::SENDHEADERS) { LOCK(cs_main); State(pfrom.GetId())->fPreferHeaders = true; return; } if (msg_type == NetMsgType::SENDCMPCT) { bool fAnnounceUsingCMPCTBLOCK = false; uint64_t nCMPCTBLOCKVersion = 0; vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion; if (nCMPCTBLOCKVersion == 1) { LOCK(cs_main); // fProvidesHeaderAndIDs is used to "lock in" version of compact // blocks we send. if (!State(pfrom.GetId())->fProvidesHeaderAndIDs) { State(pfrom.GetId())->fProvidesHeaderAndIDs = true; } State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK; if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) { State(pfrom.GetId())->fSupportsDesiredCmpctVersion = true; } } return; } if (msg_type == NetMsgType::INV) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { Misbehaving(pfrom, 20, strprintf("oversized-inv: message inv size() = %u", vInv.size())); return; } // We won't accept tx inv's if we're in blocks-only mode, or this is a // block-relay-only peer bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr); // Allow whitelisted peers to send data other than blocks in blocks only // mode if whitelistrelay is true if (pfrom.HasPermission(PF_RELAY)) { fBlocksOnly = false; } LOCK(cs_main); const auto current_time = GetTime(); std::optional best_block; for (CInv &inv : vInv) { if (interruptMsgProc) { return; } bool fAlreadyHave = AlreadyHave(inv, m_mempool); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); if (inv.type == MSG_BLOCK) { const BlockHash hash{inv.hash}; UpdateBlockAvailability(pfrom.GetId(), hash); if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(hash)) { // Headers-first is the primary method of announcement on // the network. If a node fell back to sending blocks by // inv, it's probably for a re-org. The final block hash // provided should be the highest, so send a getheaders and // then fetch the blocks we need to catch up. best_block = std::move(hash); } } else { const TxId txid(inv.hash); pfrom.AddKnownTx(txid); if (fBlocksOnly) { LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of " "protocol, disconnecting peer=%d\n", txid.ToString(), pfrom.GetId()); pfrom.fDisconnect = true; return; } else if (!fAlreadyHave && !fImporting && !fReindex && !::ChainstateActive().IsInitialBlockDownload()) { RequestTx(State(pfrom.GetId()), txid, current_time); } } } if (best_block) { m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), *best_block)); LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, best_block->ToString(), pfrom.GetId()); } return; } if (msg_type == NetMsgType::GETDATA) { std::vector vInv; vRecv >> vInv; if (vInv.size() > MAX_INV_SZ) { Misbehaving(pfrom, 20, strprintf("too-many-inv: message getdata size() = %u", vInv.size())); return; } LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom.GetId()); if (vInv.size() > 0) { LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom.GetId()); } pfrom.vRecvGetData.insert(pfrom.vRecvGetData.end(), vInv.begin(), vInv.end()); ProcessGetData(config, pfrom, m_connman, m_mempool, interruptMsgProc); return; } if (msg_type == NetMsgType::GETBLOCKS) { CBlockLocator locator; uint256 hashStop; vRecv >> locator >> hashStop; if (locator.vHave.size() > MAX_LOCATOR_SZ) { LogPrint(BCLog::NET, "getblocks locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId()); pfrom.fDisconnect = true; return; } // We might have announced the currently-being-connected tip using a // compact block, which resulted in the peer sending a getblocks // request, which we would otherwise respond to without the new block. // To avoid this situation we simply verify that we are on our best // known chain now. This is super overkill, but we handle it better // for getheaders requests, and there are no known nodes which support // compact blocks but still use getblocks to request blocks. { std::shared_ptr a_recent_block; { LOCK(cs_most_recent_block); a_recent_block = most_recent_block; } BlockValidationState state; if (!ActivateBestChain(config, state, a_recent_block)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } LOCK(cs_main); // Find the last block the caller has in the main chain const CBlockIndex *pindex = FindForkInGlobalIndex(::ChainActive(), locator); // Send the rest of the chain if (pindex) { pindex = ::ChainActive().Next(pindex); } int nLimit = 500; LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom.GetId()); for (; pindex; pindex = ::ChainActive().Next(pindex)) { if (pindex->GetBlockHash() == hashStop) { LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } // If pruning, don't inv blocks unless we have on disk and are // likely to still have for some reasonable time window (1 hour) // that block relay might require. const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / m_chainparams.GetConsensus().nPowTargetSpacing; if (fPruneMode && (!pindex->nStatus.hasData() || pindex->nHeight <= ::ChainActive().Tip()->nHeight - nPrunedBlocksLikelyToHave)) { LogPrint( BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); break; } pfrom.PushBlockInventory(pindex->GetBlockHash()); if (--nLimit <= 0) { // When this block is requested, we'll send an inv that'll // trigger the peer to getblocks the next batch of inventory. LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); pfrom.hashContinue = pindex->GetBlockHash(); break; } } return; } if (msg_type == NetMsgType::GETBLOCKTXN) { BlockTransactionsRequest req; vRecv >> req; std::shared_ptr recent_block; { LOCK(cs_most_recent_block); if (most_recent_block_hash == req.blockhash) { recent_block = most_recent_block; } // Unlock cs_most_recent_block to avoid cs_main lock inversion } if (recent_block) { SendBlockTransactions(pfrom, *recent_block, req); return; } LOCK(cs_main); const CBlockIndex *pindex = LookupBlockIndex(req.blockhash); if (!pindex || !pindex->nStatus.hasData()) { LogPrint( BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); return; } if (pindex->nHeight < ::ChainActive().Height() - MAX_BLOCKTXN_DEPTH) { // If an older block is requested (should never happen in practice, // but can happen in tests) send a block response instead of a // blocktxn response. Sending a full block response instead of a // small blocktxn response is preferable in the case where a peer // might maliciously send lots of getblocktxn requests to trigger // expensive disk reads, because it will require the peer to // actually receive all the data read from disk over the network. LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep\n", pfrom.GetId(), MAX_BLOCKTXN_DEPTH); CInv inv; inv.type = MSG_BLOCK; inv.hash = req.blockhash; pfrom.vRecvGetData.push_back(inv); // The message processing loop will go around again (without // pausing) and we'll respond then (without cs_main) return; } CBlock block; bool ret = ReadBlockFromDisk(block, pindex, m_chainparams.GetConsensus()); assert(ret); SendBlockTransactions(pfrom, block, req); return; } if (msg_type == NetMsgType::GETHEADERS) { CBlockLocator locator; BlockHash hashStop; vRecv >> locator >> hashStop; if (locator.vHave.size() > MAX_LOCATOR_SZ) { LogPrint(BCLog::NET, "getheaders locator size %lld > %d, disconnect peer=%d\n", locator.vHave.size(), MAX_LOCATOR_SZ, pfrom.GetId()); pfrom.fDisconnect = true; return; } LOCK(cs_main); if (::ChainstateActive().IsInitialBlockDownload() && !pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in " "initial block download\n", pfrom.GetId()); return; } CNodeState *nodestate = State(pfrom.GetId()); const CBlockIndex *pindex = nullptr; if (locator.IsNull()) { // If locator is null, return the hashStop block pindex = LookupBlockIndex(hashStop); if (!pindex) { return; } if (!BlockRequestAllowed(pindex, m_chainparams.GetConsensus())) { LogPrint(BCLog::NET, "%s: ignoring request from peer=%i for old block " "header that isn't in the main chain\n", __func__, pfrom.GetId()); return; } } else { // Find the last block the caller has in the main chain pindex = FindForkInGlobalIndex(::ChainActive(), locator); if (pindex) { pindex = ::ChainActive().Next(pindex); } } // we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx // count at the end std::vector vHeaders; int nLimit = MAX_HEADERS_RESULTS; LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom.GetId()); for (; pindex; pindex = ::ChainActive().Next(pindex)) { vHeaders.push_back(pindex->GetBlockHeader()); if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop) { break; } } // pindex can be nullptr either if we sent ::ChainActive().Tip() OR // if our peer has ::ChainActive().Tip() (and thus we are sending an // empty headers message). In both cases it's safe to update // pindexBestHeaderSent to be our tip. // // It is important that we simply reset the BestHeaderSent value here, // and not max(BestHeaderSent, newHeaderSent). We might have announced // the currently-being-connected tip using a compact block, which // resulted in the peer sending a headers request, which we respond to // without the new block. By resetting the BestHeaderSent, we ensure we // will re-announce the new block via headers (or compact blocks again) // in the SendMessages logic. nodestate->pindexBestHeaderSent = pindex ? pindex : ::ChainActive().Tip(); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); return; } if (msg_type == NetMsgType::TX) { // Stop processing the transaction early if // 1) We are in blocks only mode and peer has no relay permission // 2) This peer is a block-relay-only peer if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr)) { LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; return; } CTransactionRef ptx; vRecv >> ptx; const CTransaction &tx = *ptx; const TxId &txid = tx.GetId(); pfrom.AddKnownTx(txid); LOCK2(cs_main, g_cs_orphans); TxValidationState state; CNodeState *nodestate = State(pfrom.GetId()); nodestate->m_tx_download.m_tx_announced.erase(txid); nodestate->m_tx_download.m_tx_in_flight.erase(txid); EraseTxRequest(txid); if (!AlreadyHave(CInv(MSG_TX, txid), m_mempool) && AcceptToMemoryPool(config, m_mempool, state, ptx, false /* bypass_limits */, Amount::zero() /* nAbsurdFee */)) { m_mempool.check(&::ChainstateActive().CoinsTip()); RelayTransaction(tx.GetId(), m_connman); for (size_t i = 0; i < tx.vout.size(); i++) { auto it_by_prev = mapOrphanTransactionsByPrev.find(COutPoint(txid, i)); if (it_by_prev != mapOrphanTransactionsByPrev.end()) { for (const auto &elem : it_by_prev->second) { pfrom.orphan_work_set.insert(elem->first); } } } pfrom.nLastTXTime = GetTime(); LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s " "(poolsz %u txn, %u kB)\n", pfrom.GetId(), tx.GetId().ToString(), m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); // Recursively process any orphan transactions that depended on this // one ProcessOrphanTx(config, pfrom.orphan_work_set); } else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { // It may be the case that the orphans parents have all been // rejected. bool fRejectedParents = false; for (const CTxIn &txin : tx.vin) { if (recentRejects->contains(txin.prevout.GetTxId())) { fRejectedParents = true; break; } } if (!fRejectedParents) { const auto current_time = GetTime(); for (const CTxIn &txin : tx.vin) { // FIXME: MSG_TX should use a TxHash, not a TxId. const TxId _txid = txin.prevout.GetTxId(); pfrom.AddKnownTx(_txid); if (!AlreadyHave(CInv(MSG_TX, _txid), m_mempool)) { RequestTx(State(pfrom.GetId()), _txid, current_time); } } AddOrphanTx(ptx, pfrom.GetId()); // DoS prevention: do not allow mapOrphanTransactions to grow // unbounded (see CVE-2012-3789) unsigned int nMaxOrphanTx = (unsigned int)std::max( int64_t(0), gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS)); unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx); if (nEvicted > 0) { LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted); } } else { LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n", tx.GetId().ToString()); // We will continue to reject this tx since it has rejected // parents so avoid re-requesting it from other peers. recentRejects->insert(tx.GetId()); } } else { assert(recentRejects); recentRejects->insert(tx.GetId()); if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } if (pfrom.HasPermission(PF_FORCERELAY)) { // Always relay transactions received from whitelisted peers, // even if they were already in the mempool or rejected from it // due to policy, allowing the node to function as a gateway for // nodes hidden behind it. // // Never relay transactions that might result in being // disconnected (or banned). if (state.IsInvalid() && TxRelayMayResultInDisconnect(state)) { LogPrintf("Not relaying invalid transaction %s from " "whitelisted peer=%d (%s)\n", tx.GetId().ToString(), pfrom.GetId(), state.ToString()); } else { LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetId().ToString(), pfrom.GetId()); RelayTransaction(tx.GetId(), m_connman); } } } // If a tx has been detected by recentRejects, we will have reached // this point and the tx will have been ignored. Because we haven't run // the tx through AcceptToMemoryPool, we won't have computed a DoS // score for it or determined exactly why we consider it invalid. // // This means we won't penalize any peer subsequently relaying a DoSy // tx (even if we penalized the first peer who gave it to us) because // we have to account for recentRejects showing false positives. In // other words, we shouldn't penalize a peer if we aren't *sure* they // submitted a DoSy tx. // // Note that recentRejects doesn't just record DoSy or invalid // transactions, but any tx not accepted by the mempool, which may be // due to node policy (vs. consensus). So we can't blanket penalize a // peer simply for relaying a tx that our recentRejects has caught, // regardless of false positives. if (state.IsInvalid()) { LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(), pfrom.GetId(), state.ToString()); MaybePunishNodeForTx(pfrom.GetId(), state); } return; } if (msg_type == NetMsgType::CMPCTBLOCK) { // Ignore cmpctblock received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected cmpctblock message received from peer %d\n", pfrom.GetId()); return; } CBlockHeaderAndShortTxIDs cmpctblock; vRecv >> cmpctblock; bool received_new_header = false; { LOCK(cs_main); if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in // AcceptBlockHeader, request deeper headers if (!::ChainstateActive().IsInitialBlockDownload()) { m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator( pindexBestHeader), uint256())); } return; } if (!LookupBlockIndex(cmpctblock.header.GetHash())) { received_new_header = true; } } const CBlockIndex *pindex = nullptr; BlockValidationState state; if (!m_chainman.ProcessNewBlockHeaders(config, {cmpctblock.header}, state, &pindex)) { if (state.IsInvalid()) { MaybePunishNodeForBlock(pfrom.GetId(), state, /*via_compact_block*/ true, "invalid header via cmpctblock"); return; } } // When we succeed in decoding a block's txids from a cmpctblock // message we typically jump to the BLOCKTXN handling code, with a // dummy (empty) BLOCKTXN message, to re-use the logic there in // completing processing of the putative block (without cs_main). bool fProcessBLOCKTXN = false; CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION); // If we end up treating this as a plain headers message, call that as // well // without cs_main. bool fRevertToHeaderProcessing = false; // Keep a CBlock for "optimistic" compactblock reconstructions (see // below) std::shared_ptr pblock = std::make_shared(); bool fBlockReconstructed = false; { LOCK2(cs_main, g_cs_orphans); // If AcceptBlockHeader returned true, it set pindex assert(pindex); UpdateBlockAvailability(pfrom.GetId(), pindex->GetBlockHash()); CNodeState *nodestate = State(pfrom.GetId()); // If this was a new header with more work than our tip, update the // peer's last block announcement time if (received_new_header && pindex->nChainWork > ::ChainActive().Tip()->nChainWork) { nodestate->m_last_block_announcement = GetTime(); } std::map::iterator>>:: iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash()); bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end(); if (pindex->nStatus.hasData()) { // Nothing to do here return; } if (pindex->nChainWork <= ::ChainActive() .Tip() ->nChainWork || // We know something better pindex->nTx != 0) { // We had this block at some point, but pruned it if (fAlreadyInFlight) { // We requested this block for some reason, but our mempool // will probably be useless so we just grab the block via // normal getdata. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); } return; } // If we're not close to tip yet, give up and let parallel block // fetch work its magic. if (!fAlreadyInFlight && !CanDirectFetch(m_chainparams.GetConsensus())) { return; } // We want to be a bit conservative just to be extra careful about // DoS possibilities in compact block processing... if (pindex->nHeight <= ::ChainActive().Height() + 2) { if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { std::list::iterator *queuedBlockIt = nullptr; if (!MarkBlockAsInFlight(config, m_mempool, pfrom.GetId(), pindex->GetBlockHash(), m_chainparams.GetConsensus(), pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) { (*queuedBlockIt) ->partialBlock.reset( new PartiallyDownloadedBlock(config, &m_mempool)); } else { // The block was already in flight using compact // blocks from the same peer. LogPrint(BCLog::NET, "Peer sent us compact block " "we were already syncing!\n"); return; } } PartiallyDownloadedBlock &partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist MarkBlockAsReceived(pindex->GetBlockHash()); Misbehaving(pfrom, 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { // Duplicate txindices, the block is now in-flight, so // just request it. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } BlockTransactionsRequest req; for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) { if (!partialBlock.IsTxAvailable(i)) { req.indices.push_back(i); } } if (req.indices.empty()) { // Dirty hack to jump to BLOCKTXN code (TODO: move // message handling into their own functions) BlockTransactions txn; txn.blockhash = cmpctblock.header.GetHash(); blockTxnMsg << txn; fProcessBLOCKTXN = true; } else { req.blockhash = pindex->GetBlockHash(); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req)); } } else { // This block is either already in flight from a different // peer, or this peer has too many blocks outstanding to // download from. Optimistically try to reconstruct anyway // since we might be able to without any round trips. PartiallyDownloadedBlock tempBlock(config, &m_mempool); ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status != READ_STATUS_OK) { // TODO: don't ignore failures return; } std::vector dummy; status = tempBlock.FillBlock(*pblock, dummy); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } } } else { if (fAlreadyInFlight) { // We requested this block, but its far into the future, so // our mempool will probably be useless - request the block // normally. std::vector vInv(1); vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash()); m_connman.PushMessage( &pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv)); return; } else { // If this was an announce-cmpctblock, we want the same // treatment as a header message. fRevertToHeaderProcessing = true; } } } // cs_main if (fProcessBLOCKTXN) { return ProcessMessage(config, pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, time_received, interruptMsgProc); } if (fRevertToHeaderProcessing) { // Headers received from HB compact block peers are permitted to be // relayed before full validation (see BIP 152), so we don't want to // disconnect the peer if the header turns out to be for an invalid // block. Note that if a peer tries to build on an invalid chain, // that will be detected and the peer will be banned. return ProcessHeadersMessage(config, pfrom, {cmpctblock.header}, /*via_compact_block=*/true); } if (fBlockReconstructed) { // If we got here, we were able to optimistically reconstruct a // block that is in flight from some other peer. { LOCK(cs_main); mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom.GetId(), false)); } bool fNewBlock = false; // Setting fForceProcessing to true means that we bypass some of // our anti-DoS protections in AcceptBlock, which filters // unrequested blocks that might be trying to waste our resources // (eg disk space). Because we only try to reconstruct blocks when // we're close to caught up (via the CanDirectFetch() requirement // above, combined with the behavior of not requesting blocks until // we have a chain with at least nMinimumChainWork), and we ignore // compact blocks with less work than our tip, it is safe to treat // reconstructed compact blocks as having been requested. m_chainman.ProcessNewBlock(config, pblock, /*fForceProcessing=*/true, &fNewBlock); if (fNewBlock) { pfrom.nLastBlockTime = GetTime(); } else { LOCK(cs_main); mapBlockSource.erase(pblock->GetHash()); } // hold cs_main for CBlockIndex::IsValid() LOCK(cs_main); if (pindex->IsValid(BlockValidity::TRANSACTIONS)) { // Clear download state for this block, which is in process from // some other peer. We do this after calling. ProcessNewBlock so // that a malleated cmpctblock announcement can't be used to // interfere with block relay. MarkBlockAsReceived(pblock->GetHash()); } } return; } if (msg_type == NetMsgType::BLOCKTXN) { // Ignore blocktxn received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected blocktxn message received from peer %d\n", pfrom.GetId()); return; } BlockTransactions resp; vRecv >> resp; std::shared_ptr pblock = std::make_shared(); bool fBlockRead = false; { LOCK(cs_main); std::map::iterator>>:: iterator it = mapBlocksInFlight.find(resp.blockhash); if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock || it->second.first != pfrom.GetId()) { LogPrint(BCLog::NET, "Peer %d sent us block transactions for block " "we weren't expecting\n", pfrom.GetId()); return; } PartiallyDownloadedBlock &partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { // Reset in-flight state in case of whitelist. MarkBlockAsReceived(resp.blockhash); Misbehaving( pfrom, 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { // Might have collided, fall back to getdata now :( std::vector invs; invs.push_back(CInv(MSG_BLOCK, resp.blockhash)); m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs)); } else { // Block is either okay, or possibly we received // READ_STATUS_CHECKBLOCK_FAILED. // Note that CheckBlock can only fail for one of a few reasons: // 1. bad-proof-of-work (impossible here, because we've already // accepted the header) // 2. merkleroot doesn't match the transactions given (already // caught in FillBlock with READ_STATUS_FAILED, so // impossible here) // 3. the block is otherwise invalid (eg invalid coinbase, // block is too big, too many legacy sigops, etc). // So if CheckBlock failed, #3 is the only possibility. // Under BIP 152, we don't DoS-ban unless proof of work is // invalid (we don't require all the stateless checks to have // been run). This is handled below, so just treat this as // though the block was successfully read, and rely on the // handling in ProcessNewBlock to ensure the block index is // updated, etc. // it is now an empty pointer MarkBlockAsReceived(resp.blockhash); fBlockRead = true; // mapBlockSource is used for potentially punishing peers and // updating which peers send us compact blocks, so the race // between here and cs_main in ProcessNewBlock is fine. // BIP 152 permits peers to relay compact blocks after // validating the header only; we should not punish peers // if the block turns out to be invalid. mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom.GetId(), false)); } } // Don't hold cs_main when we call into ProcessNewBlock if (fBlockRead) { bool fNewBlock = false; // Since we requested this block (it was in mapBlocksInFlight), // force it to be processed, even if it would not be a candidate for // new tip (missing previous block, chain not long enough, etc) // This bypasses some anti-DoS logic in AcceptBlock (eg to prevent // disk-space attacks), but this should be safe due to the // protections in the compact block handler -- see related comment // in compact block optimistic reconstruction handling. m_chainman.ProcessNewBlock(config, pblock, /*fForceProcessing=*/true, &fNewBlock); if (fNewBlock) { pfrom.nLastBlockTime = GetTime(); } else { LOCK(cs_main); mapBlockSource.erase(pblock->GetHash()); } } return; } if (msg_type == NetMsgType::HEADERS) { // Ignore headers received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected headers message received from peer %d\n", pfrom.GetId()); return; } std::vector headers; // Bypass the normal CBlock deserialization, as we don't want to risk // deserializing 2000 full blocks. unsigned int nCount = ReadCompactSize(vRecv); if (nCount > MAX_HEADERS_RESULTS) { Misbehaving(pfrom, 20, strprintf("too-many-headers: headers message size = %u", nCount)); return; } headers.resize(nCount); for (unsigned int n = 0; n < nCount; n++) { vRecv >> headers[n]; // Ignore tx count; assume it is 0. ReadCompactSize(vRecv); } return ProcessHeadersMessage(config, pfrom, headers, /*via_compact_block=*/false); } if (msg_type == NetMsgType::BLOCK) { // Ignore block received while importing if (fImporting || fReindex) { LogPrint(BCLog::NET, "Unexpected block message received from peer %d\n", pfrom.GetId()); return; } std::shared_ptr pblock = std::make_shared(); vRecv >> *pblock; LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom.GetId()); // Process all blocks from whitelisted peers, even if not requested, // unless we're still syncing with the network. Such an unrequested // block may still be processed, subject to the conditions in // AcceptBlock(). bool forceProcessing = pfrom.HasPermission(PF_NOBAN) && !::ChainstateActive().IsInitialBlockDownload(); const BlockHash hash = pblock->GetHash(); { LOCK(cs_main); // Also always process if we requested the block explicitly, as we // may need it even though it is not a candidate for a new best tip. forceProcessing |= MarkBlockAsReceived(hash); // mapBlockSource is only used for punishing peers and setting // which peers send us compact blocks, so the race between here and // cs_main in ProcessNewBlock is fine. mapBlockSource.emplace(hash, std::make_pair(pfrom.GetId(), true)); } bool fNewBlock = false; m_chainman.ProcessNewBlock(config, pblock, forceProcessing, &fNewBlock); if (fNewBlock) { pfrom.nLastBlockTime = GetTime(); } else { LOCK(cs_main); mapBlockSource.erase(hash); } return; } if (msg_type == NetMsgType::AVAHELLO && g_avalanche && gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { if (!pfrom.m_avalanche_state) { pfrom.m_avalanche_state = std::make_unique(); } CHashVerifier verifier(&vRecv); avalanche::Delegation &delegation = pfrom.m_avalanche_state->delegation; verifier >> delegation; avalanche::Proof proof; avalanche::DelegationState state; CPubKey pubkey; if (!delegation.verify(state, proof, pubkey)) { Misbehaving(pfrom, 100, "invalid-delegation"); return; } std::array sig; verifier >> sig; } // Ignore avalanche requests while importing if (msg_type == NetMsgType::AVAPOLL && !fImporting && !fReindex && g_avalanche && gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { auto now = std::chrono::steady_clock::now(); int64_t cooldown = gArgs.GetArg("-avacooldown", AVALANCHE_DEFAULT_COOLDOWN); { LOCK(cs_main); auto &node_state = State(pfrom.GetId())->m_avalanche_state; if (now < node_state.last_poll + std::chrono::milliseconds(cooldown)) { Misbehaving(pfrom, 20, "avapool-cooldown"); } node_state.last_poll = now; } uint64_t round; Unserialize(vRecv, round); unsigned int nCount = ReadCompactSize(vRecv); if (nCount > AVALANCHE_MAX_ELEMENT_POLL) { Misbehaving( pfrom, 20, strprintf("too-many-ava-poll: poll message size = %u", nCount)); return; } std::vector votes; votes.reserve(nCount); LogPrint(BCLog::NET, "received avalanche poll from peer=%d\n", pfrom.GetId()); { LOCK(cs_main); for (unsigned int n = 0; n < nCount; n++) { CInv inv; vRecv >> inv; const auto insertVote = [&](uint32_t e) { votes.emplace_back(e, inv.hash); }; // Not a block. if (inv.type != MSG_BLOCK) { insertVote(-1); continue; } // We have a block. const CBlockIndex *pindex = LookupBlockIndex(BlockHash(inv.hash)); // Unknown block. if (!pindex) { insertVote(-1); continue; } // Invalid block if (pindex->nStatus.isInvalid()) { insertVote(1); continue; } // Parked block if (pindex->nStatus.isOnParkedChain()) { insertVote(2); continue; } const CBlockIndex *pindexTip = ::ChainActive().Tip(); const CBlockIndex *pindexFork = LastCommonAncestor(pindex, pindexTip); // Active block. if (pindex == pindexFork) { insertVote(0); continue; } // Fork block. if (pindexFork != pindexTip) { insertVote(3); continue; } // Missing block data. if (!pindex->nStatus.hasData()) { insertVote(-2); continue; } // This block is built on top of the tip, we have the data, it // is pending connection or rejection. insertVote(-3); } } // Send the query to the node. g_avalanche->sendResponse( &pfrom, avalanche::Response(round, cooldown, std::move(votes))); return; } // Ignore avalanche requests while importing if (msg_type == NetMsgType::AVARESPONSE && !fImporting && !fReindex && g_avalanche && gArgs.GetBoolArg("-enableavalanche", AVALANCHE_DEFAULT_ENABLED)) { // As long as QUIC is not implemented, we need to sign response and // verify response's signatures in order to avoid any manipulation of // messages at the transport level. CHashVerifier verifier(&vRecv); avalanche::Response response; verifier >> response; if (!g_avalanche->forNode(pfrom.GetId(), [&](const avalanche::Node &n) { std::array sig; vRecv >> sig; return n.pubkey.VerifySchnorr(verifier.GetHash(), sig); })) { Misbehaving(pfrom, 100, "invalid-ava-response-signature"); return; } std::vector updates; if (!g_avalanche->registerVotes(pfrom.GetId(), response, updates)) { return; } if (updates.size()) { for (avalanche::BlockUpdate &u : updates) { CBlockIndex *pindex = u.getBlockIndex(); switch (u.getStatus()) { case avalanche::BlockUpdate::Status::Invalid: case avalanche::BlockUpdate::Status::Rejected: { LogPrintf("Avalanche rejected %s, parking\n", pindex->GetBlockHash().GetHex()); BlockValidationState state; ::ChainstateActive().ParkBlock(config, state, pindex); if (!state.IsValid()) { LogPrintf("ERROR: Database error: %s\n", state.GetRejectReason()); return; } } break; case avalanche::BlockUpdate::Status::Accepted: case avalanche::BlockUpdate::Status::Finalized: { LogPrintf("Avalanche accepted %s\n", pindex->GetBlockHash().GetHex()); LOCK(cs_main); UnparkBlock(pindex); } break; } } BlockValidationState state; if (!ActivateBestChain(config, state)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } return; } if (msg_type == NetMsgType::GETADDR) { // This asymmetric behavior for inbound and outbound connections was // introduced to prevent a fingerprinting attack: an attacker can send // specific fake addresses to users' AddrMan and later request them by // sending getaddr messages. Making nodes which are behind NAT and can // only make outgoing connections ignore the getaddr message mitigates // the attack. if (!pfrom.IsInboundConn()) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId()); return; } if (!pfrom.IsAddrRelayPeer()) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from block-relay-only connection. " "peer=%d\n", pfrom.GetId()); return; } // Only send one GetAddr response per connection to reduce resource // waste and discourage addr stamping of INV announcements. if (pfrom.fSentAddr) { LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom.GetId()); return; } pfrom.fSentAddr = true; pfrom.vAddrToSend.clear(); std::vector vAddr = m_connman.GetAddresses(); FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { bool banned_or_discouraged = m_banman && (m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr)); if (!banned_or_discouraged) { pfrom.PushAddress(addr, insecure_rand); } } return; } if (msg_type == NetMsgType::MEMPOOL) { if (!(pfrom.GetLocalServices() & NODE_BLOOM) && !pfrom.HasPermission(PF_MEMPOOL)) { if (!pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "mempool request with bloom filters disabled, " "disconnect peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; } return; } if (m_connman.OutboundTargetReached(false) && !pfrom.HasPermission(PF_MEMPOOL)) { if (!pfrom.HasPermission(PF_NOBAN)) { LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, " "disconnect peer=%d\n", pfrom.GetId()); pfrom.fDisconnect = true; } return; } if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_tx_inventory); pfrom.m_tx_relay->fSendMempool = true; } return; } if (msg_type == NetMsgType::PING) { if (pfrom.GetCommonVersion() > BIP0031_VERSION) { uint64_t nonce = 0; vRecv >> nonce; // Echo the message back with the nonce. This allows for two useful // features: // // 1) A remote node can quickly check if the connection is // operational. // 2) Remote nodes can measure the latency of the network thread. If // this node is overloaded it won't respond to pings quickly and the // remote node can avoid sending us more work, like chain download // requests. // // The nonce stops the remote getting confused between different // pings: without it, if the remote node sends a ping once per // second and this node takes 5 seconds to respond to each, the 5th // ping the remote sends would appear to return very quickly. m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::PONG, nonce)); } return; } if (msg_type == NetMsgType::PONG) { const auto ping_end = time_received; uint64_t nonce = 0; size_t nAvail = vRecv.in_avail(); bool bPingFinished = false; std::string sProblem; if (nAvail >= sizeof(nonce)) { vRecv >> nonce; // Only process pong message if there is an outstanding ping (old // ping without nonce should never pong) if (pfrom.nPingNonceSent != 0) { if (nonce == pfrom.nPingNonceSent) { // Matching pong received, this ping is no longer // outstanding bPingFinished = true; const auto ping_time = ping_end - pfrom.m_ping_start.load(); if (ping_time.count() > 0) { // Successful ping time measurement, replace previous pfrom.nPingUsecTime = count_microseconds(ping_time); pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time)); } else { // This should never happen sProblem = "Timing mishap"; } } else { // Nonce mismatches are normal when pings are overlapping sProblem = "Nonce mismatch"; if (nonce == 0) { // This is most likely a bug in another implementation // somewhere; cancel this ping bPingFinished = true; sProblem = "Nonce zero"; } } } else { sProblem = "Unsolicited pong without ping"; } } else { // This is most likely a bug in another implementation somewhere; // cancel this ping bPingFinished = true; sProblem = "Short payload"; } if (!(sProblem.empty())) { LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", pfrom.GetId(), sProblem, pfrom.nPingNonceSent, nonce, nAvail); } if (bPingFinished) { pfrom.nPingNonceSent = 0; } return; } if (msg_type == NetMsgType::FILTERLOAD) { CBloomFilter filter; vRecv >> filter; if (!filter.IsWithinSizeConstraints()) { // There is no excuse for sending a too-large filter Misbehaving(pfrom, 100, "too-large bloom filter"); } else if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); pfrom.m_tx_relay->pfilter.reset(new CBloomFilter(filter)); pfrom.m_tx_relay->fRelayTxes = true; } return; } if (msg_type == NetMsgType::FILTERADD) { std::vector vData; vRecv >> vData; // Nodes must NEVER send a data item > 520 bytes (the max size for a // script data object, and thus, the maximum size any matched object can // have) in a filteradd message. bool bad = false; if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) { bad = true; } else if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_filter); if (pfrom.m_tx_relay->pfilter) { pfrom.m_tx_relay->pfilter->insert(vData); } else { bad = true; } } if (bad) { // The structure of this code doesn't really allow for a good error // code. We'll go generic. Misbehaving(pfrom, 100, "bad filteradd message"); } return; } if (msg_type == NetMsgType::FILTERCLEAR) { if (pfrom.m_tx_relay == nullptr) { return; } LOCK(pfrom.m_tx_relay->cs_filter); if (pfrom.GetLocalServices() & NODE_BLOOM) { pfrom.m_tx_relay->pfilter = nullptr; } pfrom.m_tx_relay->fRelayTxes = true; return; } if (msg_type == NetMsgType::FEEFILTER) { Amount newFeeFilter = Amount::zero(); vRecv >> newFeeFilter; if (MoneyRange(newFeeFilter)) { if (pfrom.m_tx_relay != nullptr) { LOCK(pfrom.m_tx_relay->cs_feeFilter); pfrom.m_tx_relay->minFeeFilter = newFeeFilter; } LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom.GetId()); } return; } if (msg_type == NetMsgType::GETCFILTERS) { ProcessGetCFilters(pfrom, vRecv, m_chainparams, m_connman); return; } if (msg_type == NetMsgType::GETCFHEADERS) { ProcessGetCFHeaders(pfrom, vRecv, m_chainparams, m_connman); return; } if (msg_type == NetMsgType::GETCFCHECKPT) { ProcessGetCFCheckPt(pfrom, vRecv, m_chainparams, m_connman); return; } if (msg_type == NetMsgType::NOTFOUND) { // Remove the NOTFOUND transactions from the peer LOCK(cs_main); CNodeState *state = State(pfrom.GetId()); std::vector vInv; vRecv >> vInv; if (vInv.size() <= MAX_PEER_TX_IN_FLIGHT + MAX_BLOCKS_IN_TRANSIT_PER_PEER) { for (CInv &inv : vInv) { if (inv.type == MSG_TX) { const TxId txid(inv.hash); // If we receive a NOTFOUND message for a txid we requested, // erase it from our data structures for this peer. auto in_flight_it = state->m_tx_download.m_tx_in_flight.find(txid); if (in_flight_it == state->m_tx_download.m_tx_in_flight.end()) { // Skip any further work if this is a spurious NOTFOUND // message. continue; } state->m_tx_download.m_tx_in_flight.erase(in_flight_it); state->m_tx_download.m_tx_announced.erase(txid); } } } return; } // Ignore unknown commands for extensibility LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId()); return; } bool PeerManager::MaybeDiscourageAndDisconnect(CNode &pnode) { const NodeId peer_id{pnode.GetId()}; PeerRef peer = GetPeerRef(peer_id); if (peer == nullptr) { return false; } { LOCK(peer->m_misbehavior_mutex); // There's nothing to do if the m_should_discourage flag isn't set if (!peer->m_should_discourage) { return false; } peer->m_should_discourage = false; } // peer.m_misbehavior_mutex if (pnode.HasPermission(PF_NOBAN)) { // We never disconnect or discourage peers for bad behavior if they have // the NOBAN permission flag LogPrintf("Warning: not punishing noban peer %d!\n", peer_id); return false; } if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id); return false; } if (pnode.addr.IsLocal()) { // We disconnect local peers for bad behavior but don't discourage // (since that would discourage all peers on the same local address) LogPrintf( "Warning: disconnecting but not discouraging local peer %d!\n", peer_id); pnode.fDisconnect = true; return true; } // Normal case: Disconnect the peer and discourage all nodes sharing the // address LogPrintf("Disconnecting and discouraging peer %d!\n", peer_id); if (m_banman) { m_banman->Discourage(pnode.addr); } m_connman.DisconnectNode(pnode.addr); return true; } bool PeerManager::ProcessMessages(const Config &config, CNode *pfrom, std::atomic &interruptMsgProc) { // // Message format // (4) message start // (12) command // (4) size // (4) checksum // (x) data // bool fMoreWork = false; if (!pfrom->vRecvGetData.empty()) { ProcessGetData(config, *pfrom, m_connman, m_mempool, interruptMsgProc); } if (!pfrom->orphan_work_set.empty()) { LOCK2(cs_main, g_cs_orphans); ProcessOrphanTx(config, pfrom->orphan_work_set); } if (pfrom->fDisconnect) { return false; } // this maintains the order of responses and prevents vRecvGetData from // growing unbounded if (!pfrom->vRecvGetData.empty()) { return true; } if (!pfrom->orphan_work_set.empty()) { return true; } // Don't bother if send buffer is too full to respond anyway if (pfrom->fPauseSend) { return false; } std::list msgs; { LOCK(pfrom->cs_vProcessMsg); if (pfrom->vProcessMsg.empty()) { return false; } // Just take one message msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin()); pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size; pfrom->fPauseRecv = pfrom->nProcessQueueSize > m_connman.GetReceiveFloodSize(); fMoreWork = !pfrom->vProcessMsg.empty(); } CNetMessage &msg(msgs.front()); msg.SetVersion(pfrom->GetCommonVersion()); // Check network magic if (!msg.m_valid_netmagic) { LogPrint(BCLog::NET, "PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId()); // Make sure we discourage where that come from for some time. if (m_banman) { m_banman->Discourage(pfrom->addr); } m_connman.DisconnectNode(pfrom->addr); pfrom->fDisconnect = true; return false; } // Check header if (!msg.m_valid_header) { LogPrint(BCLog::NET, "PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(msg.m_command), pfrom->GetId()); return fMoreWork; } const std::string &msg_type = msg.m_command; // Message size unsigned int nMessageSize = msg.m_message_size; // Checksum CDataStream &vRecv = msg.m_recv; if (!msg.m_valid_checksum) { LogPrint(BCLog::NET, "%s(%s, %u bytes): CHECKSUM ERROR peer=%d\n", __func__, SanitizeString(msg_type), nMessageSize, pfrom->GetId()); if (m_banman) { m_banman->Discourage(pfrom->addr); } m_connman.DisconnectNode(pfrom->addr); return fMoreWork; } try { ProcessMessage(config, *pfrom, msg_type, vRecv, msg.m_time, interruptMsgProc); if (interruptMsgProc) { return false; } if (!pfrom->vRecvGetData.empty()) { fMoreWork = true; } } catch (const std::exception &e) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name()); } catch (...) { LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize); } return fMoreWork; } void PeerManager::ConsiderEviction(CNode &pto, int64_t time_in_seconds) { AssertLockHeld(cs_main); CNodeState &state = *State(pto.GetId()); const CNetMsgMaker msgMaker(pto.GetCommonVersion()); if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { // This is an outbound peer subject to disconnection if they don't // announce a block with as much work as the current tip within // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if their // chain has more work than ours, we should sync to it, unless it's // invalid, in which case we should find that out and disconnect from // them elsewhere). if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork) { if (state.m_chain_sync.m_timeout != 0) { state.m_chain_sync.m_timeout = 0; state.m_chain_sync.m_work_header = nullptr; state.m_chain_sync.m_sent_getheaders = false; } } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) { // Our best block known by this peer is behind our tip, and we're // either noticing that for the first time, OR this peer was able to // catch up to some earlier point where we checked against our tip. // Either way, set a new timeout based on current tip. state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT; state.m_chain_sync.m_work_header = ::ChainActive().Tip(); state.m_chain_sync.m_sent_getheaders = false; } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) { // No evidence yet that our peer has synced to a chain with work // equal to that of our tip, when we first detected it was behind. // Send a single getheaders message to give the peer a chance to // update us. if (state.m_chain_sync.m_sent_getheaders) { // They've run out of time to catch up! LogPrintf( "Disconnecting outbound peer %d for old chain, best known " "block = %s\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : ""); pto.fDisconnect = true; } else { assert(state.m_chain_sync.m_work_header); LogPrint( BCLog::NET, "sending getheaders to outbound peer=%d to verify chain " "work (current best known block:%s, benchmark blockhash: " "%s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "", state.m_chain_sync.m_work_header->GetBlockHash() .ToString()); m_connman.PushMessage( &pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator( state.m_chain_sync.m_work_header->pprev), uint256())); state.m_chain_sync.m_sent_getheaders = true; // 2 minutes constexpr int64_t HEADERS_RESPONSE_TIME = 120; // Bump the timeout to allow a response, which could clear the // timeout (if the response shows the peer has synced), reset // the timeout (if the peer syncs to the required work but not // to our tip), or result in disconnect (if we advance to the // timeout and pindexBestKnownBlock has not sufficiently // progressed) state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME; } } } } void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds) { // Check whether we have too many outbound peers int extra_peers = m_connman.GetExtraOutboundCount(); if (extra_peers <= 0) { return; } // If we have more outbound peers than we target, disconnect one. // Pick the outbound peer that least recently announced us a new block, with // ties broken by choosing the more recent connection (higher node id) NodeId worst_peer = -1; int64_t oldest_block_announcement = std::numeric_limits::max(); m_connman.ForEachNode([&](CNode *pnode) { AssertLockHeld(cs_main); // Ignore non-outbound peers, or nodes marked for disconnect already if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) { return; } CNodeState *state = State(pnode->GetId()); if (state == nullptr) { // shouldn't be possible, but just in case return; } // Don't evict our protected peers if (state->m_chain_sync.m_protect) { return; } // Don't evict our block-relay-only peers. if (pnode->m_tx_relay == nullptr) { return; } if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) { worst_peer = pnode->GetId(); oldest_block_announcement = state->m_last_block_announcement; } }); if (worst_peer == -1) { return; } bool disconnected = m_connman.ForNode(worst_peer, [&](CNode *pnode) { AssertLockHeld(cs_main); // Only disconnect a peer that has been connected to us for some // reasonable fraction of our check-frequency, to give it time for new // information to have arrived. // Also don't disconnect any peer we're trying to download a block from. CNodeState &state = *State(pnode->GetId()); if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) { LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block " "announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement); pnode->fDisconnect = true; return true; } else { LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction " "(connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight); return false; } }); if (disconnected) { // If we disconnected an extra peer, that means we successfully // connected to at least one peer after the last time we detected a // stale tip. Don't try any more extra peers until we next detect a // stale tip, to limit the load we put on the network from these extra // connections. m_connman.SetTryNewOutboundPeer(false); } } void PeerManager::CheckForStaleTipAndEvictPeers( const Consensus::Params &consensusParams) { LOCK(cs_main); int64_t time_in_seconds = GetTime(); EvictExtraOutboundPeers(time_in_seconds); if (time_in_seconds <= m_stale_tip_check_time) { return; } // Check whether our tip is stale, and if so, allow using an extra outbound // peer. if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(consensusParams)) { LogPrintf("Potential stale tip detected, will try using extra outbound " "peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update); m_connman.SetTryNewOutboundPeer(true); } else if (m_connman.GetTryNewOutboundPeer()) { m_connman.SetTryNewOutboundPeer(false); } m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL; } namespace { class CompareInvMempoolOrder { CTxMemPool *mp; public: explicit CompareInvMempoolOrder(CTxMemPool *_mempool) { mp = _mempool; } bool operator()(std::set::iterator a, std::set::iterator b) { /** * As std::make_heap produces a max-heap, we want the entries with the * fewest ancestors/highest fee to sort later. */ return mp->CompareDepthAndScore(*b, *a); } }; } // namespace bool PeerManager::SendMessages(const Config &config, CNode *pto, std::atomic &interruptMsgProc) { const Consensus::Params &consensusParams = config.GetChainParams().GetConsensus(); // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll // disconnect misbehaving peers even before the version handshake is // complete. if (MaybeDiscourageAndDisconnect(*pto)) { return true; } // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) { return true; } // If we get here, the outgoing message serialization version is set and // can't change. const CNetMsgMaker msgMaker(pto->GetCommonVersion()); // // Message: ping // bool pingSend = false; if (pto->fPingQueued) { // RPC ping request by user pingSend = true; } if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime()) { // Ping automatically sent as a latency probe & keepalive. pingSend = true; } if (pingSend) { uint64_t nonce = 0; while (nonce == 0) { GetRandBytes((uint8_t *)&nonce, sizeof(nonce)); } pto->fPingQueued = false; pto->m_ping_start = GetTime(); if (pto->GetCommonVersion() > BIP0031_VERSION) { pto->nPingNonceSent = nonce; m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); } else { // Peer is too old to support ping command with nonce, pong will // never arrive. pto->nPingNonceSent = 0; m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); } } { LOCK(cs_main); CNodeState &state = *State(pto->GetId()); // Address refresh broadcast int64_t nNow = GetTimeMicros(); auto current_time = GetTime(); if (pto->IsAddrRelayPeer() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) { AdvertiseLocal(pto); pto->m_next_local_addr_send = PoissonNextSend( current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // // Message: addr // if (pto->IsAddrRelayPeer() && pto->m_next_addr_send < current_time) { pto->m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); std::vector vAddr; vAddr.reserve(pto->vAddrToSend.size()); assert(pto->m_addr_known); + + const char *msg_type; + int make_flags; + if (pto->m_wants_addrv2) { + msg_type = NetMsgType::ADDRV2; + make_flags = ADDRV2_FORMAT; + } else { + msg_type = NetMsgType::ADDR; + make_flags = 0; + } + for (const CAddress &addr : pto->vAddrToSend) { if (!pto->m_addr_known->contains(addr.GetKey())) { pto->m_addr_known->insert(addr.GetKey()); vAddr.push_back(addr); // receiver rejects addr messages larger than 1000 if (vAddr.size() >= 1000) { m_connman.PushMessage( - pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); + pto, msgMaker.Make(make_flags, msg_type, vAddr)); vAddr.clear(); } } } pto->vAddrToSend.clear(); if (!vAddr.empty()) { - m_connman.PushMessage(pto, - msgMaker.Make(NetMsgType::ADDR, vAddr)); + m_connman.PushMessage( + pto, msgMaker.Make(make_flags, msg_type, vAddr)); } // we only send the big addr message once if (pto->vAddrToSend.capacity() > 40) { pto->vAddrToSend.shrink_to_fit(); } } // Start block sync if (pindexBestHeader == nullptr) { pindexBestHeader = ::ChainActive().Tip(); } // Download if this is a nice peer, or we have no nice peers and this // one might do. bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're // close to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { state.fSyncStarted = true; state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime()) / (consensusParams.nPowTargetSpacing); nSyncStarted++; const CBlockIndex *pindexStart = pindexBestHeader; /** * If possible, start at the block preceding the currently best * known header. This ensures that we always get a non-empty * list of headers back as long as the peer is up-to-date. With * a non-empty response, we can initialise the peer's known best * block. This wouldn't be possible if we requested starting at * pindexBestHeader and got back an empty response. */ if (pindexStart->pprev) { pindexStart = pindexStart->pprev; } LogPrint( BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight); m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexStart), uint256())); } } // // Try sending block announcements via headers // { // If we have less than MAX_BLOCKS_TO_ANNOUNCE in our list of block // hashes we're relaying, and our peer wants headers announcements, // then find the first header not yet known to our peer but would // connect, and send. If no header would connect, or if we have too // many blocks, or if the peer doesn't want headers, just add all to // the inv queue. LOCK(pto->cs_inventory); std::vector vHeaders; bool fRevertToInv = ((!state.fPreferHeaders && (!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) || pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE); // last header queued for delivery const CBlockIndex *pBestIndex = nullptr; // ensure pindexBestKnownBlock is up-to-date ProcessBlockAvailability(pto->GetId()); if (!fRevertToInv) { bool fFoundStartingHeader = false; // Try to find first header that our peer doesn't have, and then // send all headers past that one. If we come across an headers // that aren't on ::ChainActive(), give up. for (const BlockHash &hash : pto->vBlockHashesToAnnounce) { const CBlockIndex *pindex = LookupBlockIndex(hash); assert(pindex); if (::ChainActive()[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block fRevertToInv = true; break; } if (pBestIndex != nullptr && pindex->pprev != pBestIndex) { // This means that the list of blocks to announce don't // connect to each other. This shouldn't really be // possible to hit during regular operation (because // reorgs should take us to a chain that has some block // not on the prior chain, which should be caught by the // prior check), but one way this could happen is by // using invalidateblock / reconsiderblock repeatedly on // the tip, causing it to be added multiple times to // vBlockHashesToAnnounce. Robustly deal with this rare // situation by reverting to an inv. fRevertToInv = true; break; } pBestIndex = pindex; if (fFoundStartingHeader) { // add this to the headers message vHeaders.push_back(pindex->GetBlockHeader()); } else if (PeerHasHeader(&state, pindex)) { // Keep looking for the first new block. continue; } else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) { // Peer doesn't have this header but they do have the // prior one. Start sending headers. fFoundStartingHeader = true; vHeaders.push_back(pindex->GetBlockHeader()); } else { // Peer doesn't have this header or the prior one -- // nothing will connect, so bail out. fRevertToInv = true; break; } } } if (!fRevertToInv && !vHeaders.empty()) { if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) { // We only send up to 1 block as header-and-ids, as // otherwise probably means we're doing an initial-ish-sync // or they're slow. LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->GetId()); int nSendFlags = 0; bool fGotBlockFromCache = false; { LOCK(cs_most_recent_block); if (most_recent_block_hash == pBestIndex->GetBlockHash()) { CBlockHeaderAndShortTxIDs cmpctblock( *most_recent_block); m_connman.PushMessage( pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); fGotBlockFromCache = true; } } if (!fGotBlockFromCache) { CBlock block; bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams); assert(ret); CBlockHeaderAndShortTxIDs cmpctblock(block); m_connman.PushMessage( pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock)); } state.pindexBestHeaderSent = pBestIndex; } else if (state.fPreferHeaders) { if (vHeaders.size() > 1) { LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__, vHeaders.size(), vHeaders.front().GetHash().ToString(), vHeaders.back().GetHash().ToString(), pto->GetId()); } else { LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__, vHeaders.front().GetHash().ToString(), pto->GetId()); } m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders)); state.pindexBestHeaderSent = pBestIndex; } else { fRevertToInv = true; } } if (fRevertToInv) { // If falling back to using an inv, just try to inv the tip. The // last entry in vBlockHashesToAnnounce was our tip at some // point in the past. if (!pto->vBlockHashesToAnnounce.empty()) { const BlockHash &hashToAnnounce = pto->vBlockHashesToAnnounce.back(); const CBlockIndex *pindex = LookupBlockIndex(hashToAnnounce); assert(pindex); // Warn if we're announcing a block that is not on the main // chain. This should be very rare and could be optimized // out. Just log for now. if (::ChainActive()[pindex->nHeight] != pindex) { LogPrint( BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n", hashToAnnounce.ToString(), ::ChainActive().Tip()->GetBlockHash().ToString()); } // If the peer's chain has this block, don't inv it back. if (!PeerHasHeader(&state, pindex)) { pto->PushBlockInventory(hashToAnnounce); LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__, pto->GetId(), hashToAnnounce.ToString()); } } } pto->vBlockHashesToAnnounce.clear(); } // // Message: inventory // std::vector vInv; { LOCK(pto->cs_inventory); vInv.reserve(std::max(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX_PER_MB * config.GetMaxBlockSize() / 1000000)); // Add blocks for (const BlockHash &hash : pto->vInventoryBlockToSend) { vInv.push_back(CInv(MSG_BLOCK, hash)); if (vInv.size() == MAX_INV_SZ) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->vInventoryBlockToSend.clear(); if (pto->m_tx_relay != nullptr) { LOCK(pto->m_tx_relay->cs_tx_inventory); // Check whether periodic sends should happen bool fSendTrickle = pto->HasPermission(PF_NOBAN); if (pto->m_tx_relay->nNextInvSend < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{ m_connman.PoissonNextSendInbound( nNow, INVENTORY_BROADCAST_INTERVAL)}; } else { // Skip delay for outbound peers, as there is less // privacy concern for them. pto->m_tx_relay->nNextInvSend = current_time; } } // Time to send but the peer has requested we not relay // transactions. if (fSendTrickle) { LOCK(pto->m_tx_relay->cs_filter); if (!pto->m_tx_relay->fRelayTxes) { pto->m_tx_relay->setInventoryTxToSend.clear(); } } // Respond to BIP35 mempool requests if (fSendTrickle && pto->m_tx_relay->fSendMempool) { auto vtxinfo = m_mempool.infoAll(); pto->m_tx_relay->fSendMempool = false; CFeeRate filterrate; { LOCK(pto->m_tx_relay->cs_feeFilter); filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter); } LOCK(pto->m_tx_relay->cs_filter); for (const auto &txinfo : vtxinfo) { const TxId &txid = txinfo.tx->GetId(); CInv inv(MSG_TX, txid); pto->m_tx_relay->setInventoryTxToSend.erase(txid); // Don't send transactions that peers will not put into // their mempool if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate( *txinfo.tx)) { continue; } pto->m_tx_relay->filterInventoryKnown.insert(txid); vInv.push_back(inv); if (vInv.size() == MAX_INV_SZ) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } } pto->m_tx_relay->m_last_mempool_req = GetTime(); } // Determine transactions to relay if (fSendTrickle) { // Produce a vector with all candidates for sending std::vector::iterator> vInvTx; vInvTx.reserve( pto->m_tx_relay->setInventoryTxToSend.size()); for (std::set::iterator it = pto->m_tx_relay->setInventoryTxToSend.begin(); it != pto->m_tx_relay->setInventoryTxToSend.end(); it++) { vInvTx.push_back(it); } CFeeRate filterrate; { LOCK(pto->m_tx_relay->cs_feeFilter); filterrate = CFeeRate(pto->m_tx_relay->minFeeFilter); } // Topologically and fee-rate sort the inventory we send for // privacy and priority reasons. A heap is used so that not // all items need sorting if only a few are being sent. CompareInvMempoolOrder compareInvMempoolOrder(&m_mempool); std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); // No reason to drain out at many times the network's // capacity, especially since we have many peers and some // will draw much shorter delays. unsigned int nRelayedTransactions = 0; LOCK(pto->m_tx_relay->cs_filter); while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX_PER_MB * config.GetMaxBlockSize() / 1000000) { // Fetch the top element from the heap std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder); std::set::iterator it = vInvTx.back(); vInvTx.pop_back(); const TxId txid = *it; // Remove it from the to-be-sent set pto->m_tx_relay->setInventoryTxToSend.erase(it); // Check if not in the filter already if (pto->m_tx_relay->filterInventoryKnown.contains( txid)) { continue; } // Not in the mempool anymore? don't bother sending it. auto txinfo = m_mempool.info(txid); if (!txinfo.tx) { continue; } // Peer told you to not send transactions at that // feerate? Don't bother sending it. if (txinfo.fee < filterrate.GetFee(txinfo.vsize)) { continue; } if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate( *txinfo.tx)) { continue; } // Send vInv.push_back(CInv(MSG_TX, txid)); nRelayedTransactions++; { // Expire old relay messages while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow) { mapRelay.erase(vRelayExpiration.front().second); vRelayExpiration.pop_front(); } auto ret = mapRelay.insert( std::make_pair(txid, std::move(txinfo.tx))); if (ret.second) { vRelayExpiration.push_back(std::make_pair( nNow + std::chrono::microseconds{ RELAY_TX_CACHE_TIME} .count(), ret.first)); } } if (vInv.size() == MAX_INV_SZ) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::INV, vInv)); vInv.clear(); } pto->m_tx_relay->filterInventoryKnown.insert(txid); } } } } if (!vInv.empty()) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv)); } // Detect whether we're stalling current_time = GetTime(); // nNow is the current system time (GetTimeMicros is not mockable) and // should be replaced by the mockable current_time eventually nNow = GetTimeMicros(); if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) { // Stalling only triggers when the block download window cannot // move. During normal steady state, the download window should be // much larger than the to-be-downloaded set of blocks, so // disconnection should only happen during initial block download. LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId()); pto->fDisconnect = true; return true; } // In case there is a block that has been in flight from this peer for 2 // + 0.5 * N times the block interval (with N the number of peers from // which we're downloading validated blocks), disconnect due to timeout. // We compensate for other peers to prevent killing off peers due to our // own downstream link being saturated. We only count validated // in-flight blocks so peers can't advertise non-existing block hashes // to unreasonably increase our timeout. if (state.vBlocksInFlight.size() > 0) { QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { LogPrintf("Timeout downloading block %s from peer=%d, " "disconnecting\n", queuedBlock.hash.ToString(), pto->GetId()); pto->fDisconnect = true; return true; } } // Check for headers sync timeouts if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits::max()) { // Detect whether this is a stalling initial-headers-sync peer if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24 * 60 * 60) { if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) { // Disconnect a (non-whitelisted) peer if it is our only // sync peer, and we have others we could be using instead. // Note: If all our peers are inbound, then we won't // disconnect our sync peer for stalling; we have bigger // problems if we can't get any outbound peers. if (!pto->HasPermission(PF_NOBAN)) { LogPrintf("Timeout downloading headers from peer=%d, " "disconnecting\n", pto->GetId()); pto->fDisconnect = true; return true; } else { LogPrintf( "Timeout downloading headers from whitelisted " "peer=%d, not disconnecting\n", pto->GetId()); // Reset the headers sync state so that we have a chance // to try downloading from a different peer. Note: this // will also result in at least one more getheaders // message to be sent to this peer (eventually). state.fSyncStarted = false; nSyncStarted--; state.nHeadersSyncTimeout = 0; } } } else { // After we've caught up once, reset the timeout so we can't // trigger disconnect later. state.nHeadersSyncTimeout = std::numeric_limits::max(); } } // Check that outbound peers have reasonable chains GetTime() is used by // this anti-DoS logic so we can test this using mocktime. ConsiderEviction(*pto, GetTime()); // // Message: getdata (blocks) // std::vector vGetData; if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector vToDownload; NodeId staller = -1; FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); for (const CBlockIndex *pindex : vToDownload) { vGetData.push_back(CInv(MSG_BLOCK, pindex->GetBlockHash())); MarkBlockAsInFlight(config, m_mempool, pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } if (state.nBlocksInFlight == 0 && staller != -1) { if (State(staller)->nStallingSince == 0) { State(staller)->nStallingSince = nNow; LogPrint(BCLog::NET, "Stall started peer=%d\n", staller); } } } // // Message: getdata (transactions) // // For robustness, expire old requests after a long timeout, so that we // can resume downloading transactions from a peer even if they were // unresponsive in the past. Eventually we should consider disconnecting // peers, but this is conservative. if (state.m_tx_download.m_check_expiry_timer <= current_time) { for (auto it = state.m_tx_download.m_tx_in_flight.begin(); it != state.m_tx_download.m_tx_in_flight.end();) { if (it->second <= current_time - TX_EXPIRY_INTERVAL) { LogPrint(BCLog::NET, "timeout of inflight tx %s from peer=%d\n", it->first.ToString(), pto->GetId()); state.m_tx_download.m_tx_announced.erase(it->first); state.m_tx_download.m_tx_in_flight.erase(it++); } else { ++it; } } // On average, we do this check every TX_EXPIRY_INTERVAL. Randomize // so that we're not doing this for all peers at the same time. state.m_tx_download.m_check_expiry_timer = current_time + TX_EXPIRY_INTERVAL / 2 + GetRandMicros(TX_EXPIRY_INTERVAL); } auto &tx_process_time = state.m_tx_download.m_tx_process_time; while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) { const TxId txid = tx_process_time.begin()->second; // Erase this entry from tx_process_time (it may be added back for // processing at a later time, see below) tx_process_time.erase(tx_process_time.begin()); CInv inv(MSG_TX, txid); if (!AlreadyHave(inv, m_mempool)) { // If this transaction was last requested more than 1 minute // ago, then request. const auto last_request_time = GetTxRequestTime(txid); if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId()); vGetData.push_back(inv); if (vGetData.size() >= MAX_GETDATA_SZ) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); } UpdateTxRequestTime(txid, current_time); state.m_tx_download.m_tx_in_flight.emplace(txid, current_time); } else { // This transaction is in flight from someone else; queue // up processing to happen after the download times out // (with a slight delay for inbound peers, to prefer // requests to outbound peers). const auto next_process_time = CalculateTxGetDataTime( txid, current_time, !state.fPreferredDownload); tx_process_time.emplace(next_process_time, txid); } } else { // We have already seen this transaction, no need to download. state.m_tx_download.m_tx_announced.erase(txid); state.m_tx_download.m_tx_in_flight.erase(txid); } } if (!vGetData.empty()) { m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); } // // Message: feefilter // // We don't want white listed peers to filter txs to us if we have // -whitelistforcerelay if (pto->m_tx_relay != nullptr && pto->GetCommonVersion() >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) && !pto->HasPermission(PF_FORCERELAY)) { Amount currentFilter = m_mempool .GetMinFee( gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000) .GetFeePerK(); int64_t timeNow = GetTimeMicros(); if (timeNow > pto->m_tx_relay->nextSendTimeFeeFilter) { static CFeeRate default_feerate = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE_PER_KB); static FeeFilterRounder filterRounder(default_feerate); Amount filterToSend = filterRounder.round(currentFilter); filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK()); if (filterToSend != pto->m_tx_relay->lastSentFeeFilter) { m_connman.PushMessage( pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend)); pto->m_tx_relay->lastSentFeeFilter = filterToSend; } pto->m_tx_relay->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more // than MAX_FEEFILTER_CHANGE_DELAY until scheduled broadcast, then // move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->m_tx_relay->nextSendTimeFeeFilter && (currentFilter < 3 * pto->m_tx_relay->lastSentFeeFilter / 4 || currentFilter > 4 * pto->m_tx_relay->lastSentFeeFilter / 3)) { pto->m_tx_relay->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000; } } } // release cs_main return true; } class CNetProcessingCleanup { public: CNetProcessingCleanup() {} ~CNetProcessingCleanup() { // orphan transactions mapOrphanTransactions.clear(); mapOrphanTransactionsByPrev.clear(); } }; static CNetProcessingCleanup instance_of_cnetprocessingcleanup; diff --git a/src/netaddress.cpp b/src/netaddress.cpp index 48ad0d14f..0837952f0 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -1,1173 +1,1197 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include constexpr size_t CNetAddr::V1_SERIALIZATION_SIZE; constexpr size_t CNetAddr::MAX_ADDRV2_SIZE; CNetAddr::BIP155Network CNetAddr::GetBIP155Network() const { switch (m_net) { case NET_IPV4: return BIP155Network::IPV4; case NET_IPV6: return BIP155Network::IPV6; case NET_ONION: switch (m_addr.size()) { case ADDR_TORV2_SIZE: return BIP155Network::TORV2; case ADDR_TORV3_SIZE: return BIP155Network::TORV3; default: assert(false); } case NET_I2P: return BIP155Network::I2P; case NET_CJDNS: return BIP155Network::CJDNS; case NET_INTERNAL: // should have been handled before calling this function case NET_UNROUTABLE: // m_net is never and should not be set to NET_UNROUTABLE case NET_MAX: // m_net is never and should not be set to NET_MAX assert(false); } // no default case, so the compiler can warn about missing cases assert(false); } bool CNetAddr::SetNetFromBIP155Network(uint8_t possible_bip155_net, size_t address_size) { switch (possible_bip155_net) { case BIP155Network::IPV4: if (address_size == ADDR_IPV4_SIZE) { m_net = NET_IPV4; return true; } throw std::ios_base::failure( strprintf("BIP155 IPv4 address with length %u (should be %u)", address_size, ADDR_IPV4_SIZE)); case BIP155Network::IPV6: if (address_size == ADDR_IPV6_SIZE) { m_net = NET_IPV6; return true; } throw std::ios_base::failure( strprintf("BIP155 IPv6 address with length %u (should be %u)", address_size, ADDR_IPV6_SIZE)); case BIP155Network::TORV2: if (address_size == ADDR_TORV2_SIZE) { m_net = NET_ONION; return true; } throw std::ios_base::failure( strprintf("BIP155 TORv2 address with length %u (should be %u)", address_size, ADDR_TORV2_SIZE)); case BIP155Network::TORV3: if (address_size == ADDR_TORV3_SIZE) { m_net = NET_ONION; return true; } throw std::ios_base::failure( strprintf("BIP155 TORv3 address with length %u (should be %u)", address_size, ADDR_TORV3_SIZE)); case BIP155Network::I2P: if (address_size == ADDR_I2P_SIZE) { m_net = NET_I2P; return true; } throw std::ios_base::failure( strprintf("BIP155 I2P address with length %u (should be %u)", address_size, ADDR_I2P_SIZE)); case BIP155Network::CJDNS: if (address_size == ADDR_CJDNS_SIZE) { m_net = NET_CJDNS; return true; } throw std::ios_base::failure( strprintf("BIP155 CJDNS address with length %u (should be %u)", address_size, ADDR_CJDNS_SIZE)); } // Don't throw on addresses with unknown network ids (maybe from the // future). Instead silently drop them and have the unserialization code // consume subsequent ones which may be known to us. return false; } /** * Construct an unspecified IPv6 network address (::/128). * * @note This address is considered invalid by CNetAddr::IsValid() */ CNetAddr::CNetAddr() {} void CNetAddr::SetIP(const CNetAddr &ipIn) { // Size check. switch (ipIn.m_net) { case NET_IPV4: assert(ipIn.m_addr.size() == ADDR_IPV4_SIZE); break; case NET_IPV6: assert(ipIn.m_addr.size() == ADDR_IPV6_SIZE); break; case NET_ONION: assert(ipIn.m_addr.size() == ADDR_TORV2_SIZE || ipIn.m_addr.size() == ADDR_TORV3_SIZE); break; case NET_I2P: assert(ipIn.m_addr.size() == ADDR_I2P_SIZE); break; case NET_CJDNS: assert(ipIn.m_addr.size() == ADDR_CJDNS_SIZE); break; case NET_INTERNAL: assert(ipIn.m_addr.size() == ADDR_INTERNAL_SIZE); break; case NET_UNROUTABLE: case NET_MAX: assert(false); } // no default case, so the compiler can warn about missing cases m_net = ipIn.m_net; m_addr = ipIn.m_addr; } void CNetAddr::SetLegacyIPv6(Span ipv6) { assert(ipv6.size() == ADDR_IPV6_SIZE); size_t skip{0}; if (HasPrefix(ipv6, IPV4_IN_IPV6_PREFIX)) { // IPv4-in-IPv6 m_net = NET_IPV4; skip = sizeof(IPV4_IN_IPV6_PREFIX); } else if (HasPrefix(ipv6, TORV2_IN_IPV6_PREFIX)) { // TORv2-in-IPv6 m_net = NET_ONION; skip = sizeof(TORV2_IN_IPV6_PREFIX); } else if (HasPrefix(ipv6, INTERNAL_IN_IPV6_PREFIX)) { // Internal-in-IPv6 m_net = NET_INTERNAL; skip = sizeof(INTERNAL_IN_IPV6_PREFIX); } else { // IPv6 m_net = NET_IPV6; } m_addr.assign(ipv6.begin() + skip, ipv6.end()); } /** * Create an "internal" address that represents a name or FQDN. CAddrMan uses * these fake addresses to keep track of which DNS seeds were used. * @returns Whether or not the operation was successful. * @see NET_INTERNAL, INTERNAL_IN_IPV6_PREFIX, CNetAddr::IsInternal(), * CNetAddr::IsRFC4193() */ bool CNetAddr::SetInternal(const std::string &name) { if (name.empty()) { return false; } m_net = NET_INTERNAL; uint8_t hash[32] = {}; CSHA256().Write((const uint8_t *)name.data(), name.size()).Finalize(hash); m_addr.assign(hash, hash + ADDR_INTERNAL_SIZE); return true; } namespace torv3 { // https://gitweb.torproject.org/torspec.git/tree/rend-spec-v3.txt#n2135 static constexpr size_t CHECKSUM_LEN = 2; static const uint8_t VERSION[] = {3}; static constexpr size_t TOTAL_LEN = ADDR_TORV3_SIZE + CHECKSUM_LEN + sizeof(VERSION); static void Checksum(Span addr_pubkey, uint8_t (&checksum)[CHECKSUM_LEN]) { // TORv3 CHECKSUM = H(".onion checksum" | PUBKEY | VERSION)[:2] static const uint8_t prefix[] = ".onion checksum"; static constexpr size_t prefix_len = 15; SHA3_256 hasher; hasher.Write(MakeSpan(prefix).first(prefix_len)); hasher.Write(addr_pubkey); hasher.Write(VERSION); uint8_t checksum_full[SHA3_256::OUTPUT_SIZE]; hasher.Finalize(checksum_full); memcpy(checksum, checksum_full, sizeof(checksum)); } }; // namespace torv3 /** * Parse a TOR address and set this object to it. * * @returns Whether or not the operation was successful. * * @see CNetAddr::IsTor() */ bool CNetAddr::SetSpecial(const std::string &str) { static const char *suffix{".onion"}; static constexpr size_t suffix_len{6}; if (!ValidAsCString(str) || str.size() <= suffix_len || str.substr(str.size() - suffix_len) != suffix) { return false; } bool invalid; const auto &input = DecodeBase32(str.substr(0, str.size() - suffix_len).c_str(), &invalid); if (invalid) { return false; } switch (input.size()) { case ADDR_TORV2_SIZE: m_net = NET_ONION; m_addr.assign(input.begin(), input.end()); return true; case torv3::TOTAL_LEN: { Span input_pubkey{input.data(), ADDR_TORV3_SIZE}; Span input_checksum{input.data() + ADDR_TORV3_SIZE, torv3::CHECKSUM_LEN}; Span input_version{input.data() + ADDR_TORV3_SIZE + torv3::CHECKSUM_LEN, sizeof(torv3::VERSION)}; uint8_t calculated_checksum[torv3::CHECKSUM_LEN]; torv3::Checksum(input_pubkey, calculated_checksum); if (input_checksum != calculated_checksum || input_version != torv3::VERSION) { return false; } m_net = NET_ONION; m_addr.assign(input_pubkey.begin(), input_pubkey.end()); return true; } } return false; } CNetAddr::CNetAddr(const struct in_addr &ipv4Addr) { m_net = NET_IPV4; const uint8_t *ptr = reinterpret_cast(&ipv4Addr); m_addr.assign(ptr, ptr + ADDR_IPV4_SIZE); } CNetAddr::CNetAddr(const struct in6_addr &ipv6Addr, const uint32_t scope) { SetLegacyIPv6(Span( reinterpret_cast(&ipv6Addr), sizeof(ipv6Addr))); scopeId = scope; } bool CNetAddr::IsBindAny() const { if (!IsIPv4() && !IsIPv6()) { return false; } return std::all_of(m_addr.begin(), m_addr.end(), [](uint8_t b) { return b == 0; }); } bool CNetAddr::IsIPv4() const { return m_net == NET_IPV4; } bool CNetAddr::IsIPv6() const { return m_net == NET_IPV6; } bool CNetAddr::IsRFC1918() const { return IsIPv4() && (m_addr[0] == 10 || (m_addr[0] == 192 && m_addr[1] == 168) || (m_addr[0] == 172 && m_addr[1] >= 16 && m_addr[1] <= 31)); } bool CNetAddr::IsRFC2544() const { return IsIPv4() && m_addr[0] == 198 && (m_addr[1] == 18 || m_addr[1] == 19); } bool CNetAddr::IsRFC3927() const { return IsIPv4() && HasPrefix(m_addr, std::array{{169, 254}}); } bool CNetAddr::IsRFC6598() const { return IsIPv4() && m_addr[0] == 100 && m_addr[1] >= 64 && m_addr[1] <= 127; } bool CNetAddr::IsRFC5737() const { return IsIPv4() && (HasPrefix(m_addr, std::array{{192, 0, 2}}) || HasPrefix(m_addr, std::array{{198, 51, 100}}) || HasPrefix(m_addr, std::array{{203, 0, 113}})); } bool CNetAddr::IsRFC3849() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x20, 0x01, 0x0D, 0xB8}}); } bool CNetAddr::IsRFC3964() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x20, 0x02}}); } bool CNetAddr::IsRFC6052() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x00, 0x64, 0xFF, 0x9B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}); } bool CNetAddr::IsRFC4380() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x20, 0x01, 0x00, 0x00}}); } bool CNetAddr::IsRFC4862() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0xFE, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}}); } bool CNetAddr::IsRFC4193() const { return IsIPv6() && (m_addr[0] & 0xFE) == 0xFC; } bool CNetAddr::IsRFC6145() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x00, 0x00}}); } bool CNetAddr::IsRFC4843() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x20, 0x01, 0x00}}) && (m_addr[3] & 0xF0) == 0x10; } bool CNetAddr::IsRFC7343() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x20, 0x01, 0x00}}) && (m_addr[3] & 0xF0) == 0x20; } bool CNetAddr::IsHeNet() const { return IsIPv6() && HasPrefix(m_addr, std::array{{0x20, 0x01, 0x04, 0x70}}); } /** * Check whether this object represents a TOR address. * * @see CNetAddr::SetSpecial(const std::string &) */ bool CNetAddr::IsTor() const { return m_net == NET_ONION; } /** * Check whether this object represents an I2P address. */ bool CNetAddr::IsI2P() const { return m_net == NET_I2P; } /** * Check whether this object represents a CJDNS address. */ bool CNetAddr::IsCJDNS() const { return m_net == NET_CJDNS; } bool CNetAddr::IsLocal() const { // IPv4 loopback (127.0.0.0/8 or 0.0.0.0/8) if (IsIPv4() && (m_addr[0] == 127 || m_addr[0] == 0)) { return true; } // IPv6 loopback (::1/128) static const uint8_t pchLocal[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}; if (IsIPv6() && memcmp(m_addr.data(), pchLocal, sizeof(pchLocal)) == 0) { return true; } return false; } /** * @returns Whether or not this network address is a valid address that @a could * be used to refer to an actual host. * * @note A valid address may or may not be publicly routable on the global * internet. As in, the set of valid addresses is a superset of the set of * publicly routable addresses. * * @see CNetAddr::IsRoutable() */ bool CNetAddr::IsValid() const { // Cleanup 3-byte shifted addresses caused by garbage in size field of addr // messages from versions before 0.2.9 checksum. // Two consecutive addr messages look like this: // header20 vectorlen3 addr26 addr26 addr26 header20 vectorlen3 addr26 // addr26 addr26... so if the first length field is garbled, it reads the // second batch of addr misaligned by 3 bytes. if (IsIPv6() && memcmp(m_addr.data(), IPV4_IN_IPV6_PREFIX.data() + 3, sizeof(IPV4_IN_IPV6_PREFIX) - 3) == 0) { return false; } // unspecified IPv6 address (::/128) uint8_t ipNone6[16] = {}; if (IsIPv6() && memcmp(m_addr.data(), ipNone6, sizeof(ipNone6)) == 0) { return false; } // documentation IPv6 address if (IsRFC3849()) { return false; } if (IsInternal()) { return false; } if (IsIPv4()) { const uint32_t addr = ReadBE32(m_addr.data()); if (addr == INADDR_ANY || addr == INADDR_NONE) { return false; } } return true; } /** * @returns Whether or not this network address is publicly routable on the * global internet. * * @note A routable address is always valid. As in, the set of routable * addresses is a subset of the set of valid addresses. * * @see CNetAddr::IsValid() */ bool CNetAddr::IsRoutable() const { return IsValid() && !(IsRFC1918() || IsRFC2544() || IsRFC3927() || IsRFC4862() || IsRFC6598() || IsRFC5737() || (IsRFC4193() && !IsTor()) || IsRFC4843() || IsRFC7343() || IsLocal() || IsInternal()); } /** * @returns Whether or not this is a dummy address that represents a name. * * @see CNetAddr::SetInternal(const std::string &) */ bool CNetAddr::IsInternal() const { return m_net == NET_INTERNAL; } +bool CNetAddr::IsAddrV1Compatible() const { + switch (m_net) { + case NET_IPV4: + case NET_IPV6: + case NET_INTERNAL: + return true; + case NET_ONION: + return m_addr.size() == ADDR_TORV2_SIZE; + case NET_I2P: + case NET_CJDNS: + return false; + case NET_UNROUTABLE: + // m_net is never and should not be set to NET_UNROUTABLE + case NET_MAX: + // m_net is never and should not be set to NET_MAX + assert(false); + } // no default case, so the compiler can warn about missing cases + + assert(false); +} + enum Network CNetAddr::GetNetwork() const { if (IsInternal()) { return NET_INTERNAL; } if (!IsRoutable()) { return NET_UNROUTABLE; } return m_net; } static std::string IPv6ToString(Span a) { assert(a.size() == ADDR_IPV6_SIZE); // clang-format off return strprintf("%x:%x:%x:%x:%x:%x:%x:%x", ReadBE16(&a[0]), ReadBE16(&a[2]), ReadBE16(&a[4]), ReadBE16(&a[6]), ReadBE16(&a[8]), ReadBE16(&a[10]), ReadBE16(&a[12]), ReadBE16(&a[14])); // clang-format on } std::string CNetAddr::ToStringIP() const { switch (m_net) { case NET_IPV4: case NET_IPV6: { CService serv(*this, 0); struct sockaddr_storage sockaddr; socklen_t socklen = sizeof(sockaddr); if (serv.GetSockAddr((struct sockaddr *)&sockaddr, &socklen)) { char name[1025] = ""; if (!getnameinfo((const struct sockaddr *)&sockaddr, socklen, name, sizeof(name), nullptr, 0, NI_NUMERICHOST)) { return std::string(name); } } if (m_net == NET_IPV4) { return strprintf("%u.%u.%u.%u", m_addr[0], m_addr[1], m_addr[2], m_addr[3]); } return IPv6ToString(m_addr); } case NET_ONION: switch (m_addr.size()) { case ADDR_TORV2_SIZE: return EncodeBase32(m_addr) + ".onion"; case ADDR_TORV3_SIZE: { uint8_t checksum[torv3::CHECKSUM_LEN]; torv3::Checksum(m_addr, checksum); // TORv3 onion_address = base32(PUBKEY | CHECKSUM | VERSION) // + ".onion" prevector address{m_addr.begin(), m_addr.end()}; address.insert(address.end(), checksum, checksum + torv3::CHECKSUM_LEN); address.insert(address.end(), torv3::VERSION, torv3::VERSION + sizeof(torv3::VERSION)); return EncodeBase32(address) + ".onion"; } default: assert(false); } case NET_I2P: return EncodeBase32(m_addr, false /* don't pad with = */) + ".b32.i2p"; case NET_CJDNS: return IPv6ToString(m_addr); case NET_INTERNAL: return EncodeBase32(m_addr) + ".internal"; case NET_UNROUTABLE: // m_net is never and should not be set to NET_UNROUTABLE case NET_MAX: // m_net is never and should not be set to NET_MAX assert(false); } // no default case, so the compiler can warn about missing cases assert(false); } std::string CNetAddr::ToString() const { return ToStringIP(); } bool operator==(const CNetAddr &a, const CNetAddr &b) { return a.m_net == b.m_net && a.m_addr == b.m_addr; } bool operator<(const CNetAddr &a, const CNetAddr &b) { return std::tie(a.m_net, a.m_addr) < std::tie(b.m_net, b.m_addr); } /** * Try to get our IPv4 address. * * @param[out] pipv4Addr The in_addr struct to which to copy. * * @returns Whether or not the operation was successful, in particular, whether * or not our address was an IPv4 address. * * @see CNetAddr::IsIPv4() */ bool CNetAddr::GetInAddr(struct in_addr *pipv4Addr) const { if (!IsIPv4()) { return false; } assert(sizeof(*pipv4Addr) == m_addr.size()); memcpy(pipv4Addr, m_addr.data(), m_addr.size()); return true; } /** * Try to get our IPv6 address. * * @param[out] pipv6Addr The in6_addr struct to which to copy. * * @returns Whether or not the operation was successful, in particular, whether * or not our address was an IPv6 address. * * @see CNetAddr::IsIPv6() */ bool CNetAddr::GetIn6Addr(struct in6_addr *pipv6Addr) const { if (!IsIPv6()) { return false; } assert(sizeof(*pipv6Addr) == m_addr.size()); memcpy(pipv6Addr, m_addr.data(), m_addr.size()); return true; } bool CNetAddr::HasLinkedIPv4() const { return IsRoutable() && (IsIPv4() || IsRFC6145() || IsRFC6052() || IsRFC3964() || IsRFC4380()); } uint32_t CNetAddr::GetLinkedIPv4() const { if (IsIPv4()) { return ReadBE32(m_addr.data()); } else if (IsRFC6052() || IsRFC6145()) { // mapped IPv4, SIIT translated IPv4: the IPv4 address is the last 4 // bytes of the address return ReadBE32(MakeSpan(m_addr).last(ADDR_IPV4_SIZE).data()); } else if (IsRFC3964()) { // 6to4 tunneled IPv4: the IPv4 address is in bytes 2-6 return ReadBE32(MakeSpan(m_addr).subspan(2, ADDR_IPV4_SIZE).data()); } else if (IsRFC4380()) { // Teredo tunneled IPv4: the IPv4 address is in the last 4 bytes of the // address, but bitflipped return ~ReadBE32(MakeSpan(m_addr).last(ADDR_IPV4_SIZE).data()); } assert(false); } uint32_t CNetAddr::GetNetClass() const { // Make sure that if we return NET_IPV6, then IsIPv6() is true. The callers // expect that. // Check for "internal" first because such addresses are also !IsRoutable() // and we don't want to return NET_UNROUTABLE in that case. if (IsInternal()) { return NET_INTERNAL; } if (!IsRoutable()) { return NET_UNROUTABLE; } if (HasLinkedIPv4()) { return NET_IPV4; } return m_net; } uint32_t CNetAddr::GetMappedAS(const std::vector &asmap) const { uint32_t net_class = GetNetClass(); if (asmap.size() == 0 || (net_class != NET_IPV4 && net_class != NET_IPV6)) { return 0; // Indicates not found, safe because AS0 is reserved per // RFC7607. } std::vector ip_bits(128); if (HasLinkedIPv4()) { // For lookup, treat as if it was just an IPv4 address // (IPV4_IN_IPV6_PREFIX + IPv4 bits) for (int8_t byte_i = 0; byte_i < 12; ++byte_i) { for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { ip_bits[byte_i * 8 + bit_i] = (IPV4_IN_IPV6_PREFIX[byte_i] >> (7 - bit_i)) & 1; } } uint32_t ipv4 = GetLinkedIPv4(); for (int i = 0; i < 32; ++i) { ip_bits[96 + i] = (ipv4 >> (31 - i)) & 1; } } else { // Use all 128 bits of the IPv6 address otherwise assert(IsIPv6()); for (int8_t byte_i = 0; byte_i < 16; ++byte_i) { uint8_t cur_byte = m_addr[byte_i]; for (uint8_t bit_i = 0; bit_i < 8; ++bit_i) { ip_bits[byte_i * 8 + bit_i] = (cur_byte >> (7 - bit_i)) & 1; } } } uint32_t mapped_as = Interpret(asmap, ip_bits); return mapped_as; } /** * Get the canonical identifier of our network group * * The groups are assigned in a way where it should be costly for an attacker to * obtain addresses with many different group identifiers, even if it is cheap * to obtain addresses with the same identifier. * * @note No two connections will be attempted to addresses with the same network * group. */ std::vector CNetAddr::GetGroup(const std::vector &asmap) const { std::vector vchRet; uint32_t net_class = GetNetClass(); // If non-empty asmap is supplied and the address is IPv4/IPv6, // return ASN to be used for bucketing. uint32_t asn = GetMappedAS(asmap); if (asn != 0) { // Either asmap was empty, or address has non-asmappable net // class (e.g. TOR). vchRet.push_back(NET_IPV6); // IPv4 and IPv6 with same ASN should be in // the same bucket for (int i = 0; i < 4; i++) { vchRet.push_back((asn >> (8 * i)) & 0xFF); } return vchRet; } vchRet.push_back(net_class); int nBits{0}; if (IsLocal()) { // all local addresses belong to the same group } else if (IsInternal()) { // all internal-usage addresses get their own group nBits = ADDR_INTERNAL_SIZE * 8; } else if (!IsRoutable()) { // all other unroutable addresses belong to the same group } else if (HasLinkedIPv4()) { // IPv4 addresses (and mapped IPv4 addresses) use /16 groups uint32_t ipv4 = GetLinkedIPv4(); vchRet.push_back((ipv4 >> 24) & 0xFF); vchRet.push_back((ipv4 >> 16) & 0xFF); return vchRet; } else if (IsTor() || IsI2P() || IsCJDNS()) { nBits = 4; } else if (IsHeNet()) { // for he.net, use /36 groups nBits = 36; } else { // for the rest of the IPv6 network, use /32 groups nBits = 32; } // Push our address onto vchRet. const size_t num_bytes = nBits / 8; vchRet.insert(vchRet.end(), m_addr.begin(), m_addr.begin() + num_bytes); nBits %= 8; // ...for the last byte, push nBits and for the rest of the byte push 1's if (nBits > 0) { assert(num_bytes < m_addr.size()); vchRet.push_back(m_addr[num_bytes] | ((1 << (8 - nBits)) - 1)); } return vchRet; } std::vector CNetAddr::GetAddrBytes() const { - uint8_t serialized[V1_SERIALIZATION_SIZE]; - SerializeV1Array(serialized); - return {std::begin(serialized), std::end(serialized)}; + if (IsAddrV1Compatible()) { + uint8_t serialized[V1_SERIALIZATION_SIZE]; + SerializeV1Array(serialized); + return {std::begin(serialized), std::end(serialized)}; + } + return std::vector(m_addr.begin(), m_addr.end()); } uint64_t CNetAddr::GetHash() const { uint256 hash = Hash(m_addr); uint64_t nRet; memcpy(&nRet, &hash, sizeof(nRet)); return nRet; } // private extensions to enum Network, only returned by GetExtNetwork, and only // used in GetReachabilityFrom static const int NET_UNKNOWN = NET_MAX + 0; static const int NET_TEREDO = NET_MAX + 1; static int GetExtNetwork(const CNetAddr *addr) { if (addr == nullptr) { return NET_UNKNOWN; } if (addr->IsRFC4380()) { return NET_TEREDO; } return addr->GetNetwork(); } /** Calculates a metric for how reachable (*this) is from a given partner */ int CNetAddr::GetReachabilityFrom(const CNetAddr *paddrPartner) const { enum Reachability { REACH_UNREACHABLE, REACH_DEFAULT, REACH_TEREDO, REACH_IPV6_WEAK, REACH_IPV4, REACH_IPV6_STRONG, REACH_PRIVATE }; if (!IsRoutable() || IsInternal()) { return REACH_UNREACHABLE; } int ourNet = GetExtNetwork(this); int theirNet = GetExtNetwork(paddrPartner); bool fTunnel = IsRFC3964() || IsRFC6052() || IsRFC6145(); switch (theirNet) { case NET_IPV4: switch (ourNet) { default: return REACH_DEFAULT; case NET_IPV4: return REACH_IPV4; } case NET_IPV6: switch (ourNet) { default: return REACH_DEFAULT; case NET_TEREDO: return REACH_TEREDO; case NET_IPV4: return REACH_IPV4; // only prefer giving our IPv6 address if it's not tunnelled case NET_IPV6: return fTunnel ? REACH_IPV6_WEAK : REACH_IPV6_STRONG; } case NET_ONION: switch (ourNet) { default: return REACH_DEFAULT; // Tor users can connect to IPv4 as well case NET_IPV4: return REACH_IPV4; case NET_ONION: return REACH_PRIVATE; } case NET_TEREDO: switch (ourNet) { default: return REACH_DEFAULT; case NET_TEREDO: return REACH_TEREDO; case NET_IPV6: return REACH_IPV6_WEAK; case NET_IPV4: return REACH_IPV4; } case NET_UNKNOWN: case NET_UNROUTABLE: default: switch (ourNet) { default: return REACH_DEFAULT; case NET_TEREDO: return REACH_TEREDO; case NET_IPV6: return REACH_IPV6_WEAK; case NET_IPV4: return REACH_IPV4; // either from Tor, or don't care about our address case NET_ONION: return REACH_PRIVATE; } } } CService::CService() : port(0) {} CService::CService(const CNetAddr &cip, unsigned short portIn) : CNetAddr(cip), port(portIn) {} CService::CService(const struct in_addr &ipv4Addr, unsigned short portIn) : CNetAddr(ipv4Addr), port(portIn) {} CService::CService(const struct in6_addr &ipv6Addr, unsigned short portIn) : CNetAddr(ipv6Addr), port(portIn) {} CService::CService(const struct sockaddr_in &addr) : CNetAddr(addr.sin_addr), port(ntohs(addr.sin_port)) { assert(addr.sin_family == AF_INET); } CService::CService(const struct sockaddr_in6 &addr) : CNetAddr(addr.sin6_addr, addr.sin6_scope_id), port(ntohs(addr.sin6_port)) { assert(addr.sin6_family == AF_INET6); } bool CService::SetSockAddr(const struct sockaddr *paddr) { switch (paddr->sa_family) { case AF_INET: *this = CService(*reinterpret_cast(paddr)); return true; case AF_INET6: *this = CService(*reinterpret_cast(paddr)); return true; default: return false; } } unsigned short CService::GetPort() const { return port; } bool operator==(const CService &a, const CService &b) { return static_cast(a) == static_cast(b) && a.port == b.port; } bool operator<(const CService &a, const CService &b) { return static_cast(a) < static_cast(b) || (static_cast(a) == static_cast(b) && a.port < b.port); } /** * Obtain the IPv4/6 socket address this represents. * * @param[out] paddr The obtained socket address. * @param[in,out] addrlen The size, in bytes, of the address structure pointed * to by paddr. The value that's pointed to by this * parameter might change after calling this function if * the size of the corresponding address structure * changed. * * @returns Whether or not the operation was successful. */ bool CService::GetSockAddr(struct sockaddr *paddr, socklen_t *addrlen) const { if (IsIPv4()) { if (*addrlen < (socklen_t)sizeof(struct sockaddr_in)) { return false; } *addrlen = sizeof(struct sockaddr_in); struct sockaddr_in *paddrin = reinterpret_cast(paddr); memset(paddrin, 0, *addrlen); if (!GetInAddr(&paddrin->sin_addr)) { return false; } paddrin->sin_family = AF_INET; paddrin->sin_port = htons(port); return true; } if (IsIPv6()) { if (*addrlen < (socklen_t)sizeof(struct sockaddr_in6)) { return false; } *addrlen = sizeof(struct sockaddr_in6); struct sockaddr_in6 *paddrin6 = reinterpret_cast(paddr); memset(paddrin6, 0, *addrlen); if (!GetIn6Addr(&paddrin6->sin6_addr)) { return false; } paddrin6->sin6_scope_id = scopeId; paddrin6->sin6_family = AF_INET6; paddrin6->sin6_port = htons(port); return true; } return false; } /** * @returns An identifier unique to this service's address and port number. */ std::vector CService::GetKey() const { auto key = GetAddrBytes(); // most significant byte of our port key.push_back(port / 0x100); // least significant byte of our port key.push_back(port & 0x0FF); return key; } std::string CService::ToStringPort() const { return strprintf("%u", port); } std::string CService::ToStringIPPort() const { if (IsIPv4() || IsTor() || IsI2P() || IsInternal()) { return ToStringIP() + ":" + ToStringPort(); } else { return "[" + ToStringIP() + "]:" + ToStringPort(); } } std::string CService::ToString() const { return ToStringIPPort(); } CSubNet::CSubNet() : valid(false) { memset(netmask, 0, sizeof(netmask)); } CSubNet::CSubNet(const CNetAddr &addr, uint8_t mask) : CSubNet() { valid = (addr.IsIPv4() && mask <= ADDR_IPV4_SIZE * 8) || (addr.IsIPv6() && mask <= ADDR_IPV6_SIZE * 8); if (!valid) { return; } assert(mask <= sizeof(netmask) * 8); network = addr; uint8_t n = mask; for (size_t i = 0; i < network.m_addr.size(); ++i) { const uint8_t bits = n < 8 ? n : 8; // Set first bits. netmask[i] = (uint8_t)((uint8_t)0xFF << (8 - bits)); // Normalize network according to netmask. network.m_addr[i] &= netmask[i]; n -= bits; } } /** * @returns The number of 1-bits in the prefix of the specified subnet mask. If * the specified subnet mask is not a valid one, -1. */ static inline int NetmaskBits(uint8_t x) { switch (x) { case 0x00: return 0; case 0x80: return 1; case 0xc0: return 2; case 0xe0: return 3; case 0xf0: return 4; case 0xf8: return 5; case 0xfc: return 6; case 0xfe: return 7; case 0xff: return 8; default: return -1; } } CSubNet::CSubNet(const CNetAddr &addr, const CNetAddr &mask) : CSubNet() { valid = (addr.IsIPv4() || addr.IsIPv6()) && addr.m_net == mask.m_net; if (!valid) { return; } // Check if `mask` contains 1-bits after 0-bits (which is an invalid // netmask). bool zeros_found = false; for (auto b : mask.m_addr) { const int num_bits = NetmaskBits(b); if (num_bits == -1 || (zeros_found && num_bits != 0)) { valid = false; return; } if (num_bits < 8) { zeros_found = true; } } assert(mask.m_addr.size() <= sizeof(netmask)); memcpy(netmask, mask.m_addr.data(), mask.m_addr.size()); network = addr; // Normalize network according to netmask for (size_t x = 0; x < network.m_addr.size(); ++x) { network.m_addr[x] &= netmask[x]; } } CSubNet::CSubNet(const CNetAddr &addr) : CSubNet() { valid = addr.IsIPv4() || addr.IsIPv6(); if (!valid) { return; } assert(addr.m_addr.size() <= sizeof(netmask)); memset(netmask, 0xFF, addr.m_addr.size()); network = addr; } /** * @returns True if this subnet is valid, the specified address is valid, and * the specified address belongs in this subnet. */ bool CSubNet::Match(const CNetAddr &addr) const { if (!valid || !addr.IsValid() || network.m_net != addr.m_net) { return false; } assert(network.m_addr.size() == addr.m_addr.size()); for (size_t x = 0; x < addr.m_addr.size(); ++x) { if ((addr.m_addr[x] & netmask[x]) != network.m_addr[x]) { return false; } } return true; } std::string CSubNet::ToString() const { assert(network.m_addr.size() <= sizeof(netmask)); uint8_t cidr = 0; for (size_t i = 0; i < network.m_addr.size(); ++i) { if (netmask[i] == 0x00) { break; } cidr += NetmaskBits(netmask[i]); } return network.ToString() + strprintf("/%u", cidr); } bool CSubNet::IsValid() const { return valid; } bool CSubNet::SanityCheck() const { if (!(network.IsIPv4() || network.IsIPv6())) { return false; } for (size_t x = 0; x < network.m_addr.size(); ++x) { if (network.m_addr[x] & ~netmask[x]) { return false; } } return true; } bool operator==(const CSubNet &a, const CSubNet &b) { return a.valid == b.valid && a.network == b.network && !memcmp(a.netmask, b.netmask, 16); } bool operator<(const CSubNet &a, const CSubNet &b) { return (a.network < b.network || (a.network == b.network && memcmp(a.netmask, b.netmask, 16) < 0)); } bool SanityCheckASMap(const std::vector &asmap) { // For IP address lookups, the input is 128 bits return SanityCheckASMap(asmap, 128); } diff --git a/src/netaddress.h b/src/netaddress.h index a8da1a76e..be34c40f2 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -1,532 +1,539 @@ // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef BITCOIN_NETADDRESS_H #define BITCOIN_NETADDRESS_H #if defined(HAVE_CONFIG_H) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include /** * A flag that is ORed into the protocol version to designate that addresses * should be serialized in (unserialized from) v2 format (BIP155). * Make sure that this does not collide with any of the values in `version.h`. */ static const int ADDRV2_FORMAT = 0x20000000; /** * A network type. * @note An address may belong to more than one network, for example `10.0.0.1` * belongs to both `NET_UNROUTABLE` and `NET_IPV4`. * Keep these sequential starting from 0 and `NET_MAX` as the last entry. * We have loops like `for (int i = 0; i < NET_MAX; i++)` that expect to iterate * over all enum values and also `GetExtNetwork()` "extends" this enum by * introducing standalone constants starting from `NET_MAX`. */ enum Network { /// Addresses from these networks are not publicly routable on the global /// Internet. NET_UNROUTABLE = 0, /// IPv4 NET_IPV4, /// IPv6 NET_IPV6, /// TOR (v2 or v3) NET_ONION, /// I2P NET_I2P, /// CJDNS NET_CJDNS, /// A set of addresses that represent the hash of a string or FQDN. We use /// them in CAddrMan to keep track of which DNS seeds were used. NET_INTERNAL, /// Dummy value to indicate the number of NET_* constants. NET_MAX, }; /// Prefix of an IPv6 address when it contains an embedded IPv4 address. /// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155). static const std::array IPV4_IN_IPV6_PREFIX{ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF}}; /// Prefix of an IPv6 address when it contains an embedded TORv2 address. /// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155). /// Such dummy IPv6 addresses are guaranteed to not be publicly routable as they /// fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses. static const std::array TORV2_IN_IPV6_PREFIX{ {0xFD, 0x87, 0xD8, 0x7E, 0xEB, 0x43}}; /// Prefix of an IPv6 address when it contains an embedded "internal" address. /// Used when (un)serializing addresses in ADDRv1 format (pre-BIP155). /// The prefix comes from 0xFD + SHA256("bitcoin")[0:5]. /// Such dummy IPv6 addresses are guaranteed to not be publicly routable as they /// fall under RFC4193's fc00::/7 subnet allocated to unique-local addresses. static const std::array INTERNAL_IN_IPV6_PREFIX{ // 0xFD + sha256("bitcoin")[0:5]. {0xFD, 0x6B, 0x88, 0xC0, 0x87, 0x24}}; /// Size of IPv4 address (in bytes). static constexpr size_t ADDR_IPV4_SIZE = 4; /// Size of IPv6 address (in bytes). static constexpr size_t ADDR_IPV6_SIZE = 16; /// Size of TORv2 address (in bytes). static constexpr size_t ADDR_TORV2_SIZE = 10; /// Size of TORv3 address (in bytes). This is the length of just the address /// as used in BIP155, without the checksum and the version byte. static constexpr size_t ADDR_TORV3_SIZE = 32; /// Size of I2P address (in bytes). static constexpr size_t ADDR_I2P_SIZE = 32; /// Size of CJDNS address (in bytes). static constexpr size_t ADDR_CJDNS_SIZE = 16; /// Size of "internal" (NET_INTERNAL) address (in bytes). static constexpr size_t ADDR_INTERNAL_SIZE = 10; /** * Network address. */ class CNetAddr { protected: /** * Raw representation of the network address. * In network byte order (big endian) for IPv4 and IPv6. */ prevector m_addr{ADDR_IPV6_SIZE, 0x0}; /** * Network to which this address belongs. */ Network m_net{NET_IPV6}; // for scoped/link-local ipv6 addresses uint32_t scopeId{0}; public: CNetAddr(); explicit CNetAddr(const struct in_addr &ipv4Addr); void SetIP(const CNetAddr &ip); /** * Set from a legacy IPv6 address. * Legacy IPv6 address may be a normal IPv6 address, or another address * (e.g. IPv4) disguised as IPv6. This encoding is used in the legacy * `addr` encoding. */ void SetLegacyIPv6(Span ipv6); bool SetInternal(const std::string &name); // for Tor addresses bool SetSpecial(const std::string &strName); // INADDR_ANY equivalent bool IsBindAny() const; // IPv4 mapped address (::FFFF:0:0/96, 0.0.0.0/0) bool IsIPv4() const; // IPv6 address (not mapped IPv4, not Tor) bool IsIPv6() const; // IPv4 private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) bool IsRFC1918() const; // IPv4 inter-network communications (198.18.0.0/15) bool IsRFC2544() const; // IPv4 ISP-level NAT (100.64.0.0/10) bool IsRFC6598() const; // IPv4 documentation addresses (192.0.2.0/24, 198.51.100.0/24, // 203.0.113.0/24) bool IsRFC5737() const; // IPv6 documentation address (2001:0DB8::/32) bool IsRFC3849() const; // IPv4 autoconfig (169.254.0.0/16) bool IsRFC3927() const; // IPv6 6to4 tunnelling (2002::/16) bool IsRFC3964() const; // IPv6 unique local (FC00::/7) bool IsRFC4193() const; // IPv6 Teredo tunnelling (2001::/32) bool IsRFC4380() const; // IPv6 ORCHID (deprecated) (2001:10::/28) bool IsRFC4843() const; // IPv6 ORCHIDv2 (2001:20::/28) bool IsRFC7343() const; // IPv6 autoconfig (FE80::/64) bool IsRFC4862() const; // IPv6 well-known prefix for IPv4-embedded address (64:FF9B::/96) bool IsRFC6052() const; // IPv6 IPv4-translated address (::FFFF:0:0:0/96) (actually defined in // RFC2765) bool IsRFC6145() const; // IPv6 Hurricane Electric - https://he.net (2001:0470::/36) bool IsHeNet() const; bool IsTor() const; bool IsI2P() const; bool IsCJDNS() const; bool IsLocal() const; bool IsRoutable() const; bool IsInternal() const; bool IsValid() const; + + /** + * Check if the current object can be serialized in pre-ADDRv2/BIP155 + * format. + */ + bool IsAddrV1Compatible() const; + enum Network GetNetwork() const; std::string ToString() const; std::string ToStringIP() const; uint64_t GetHash() const; bool GetInAddr(struct in_addr *pipv4Addr) const; uint32_t GetNetClass() const; //! For IPv4, mapped IPv4, SIIT translated IPv4, Teredo, 6to4 tunneled //! addresses, return the relevant IPv4 address as a uint32. uint32_t GetLinkedIPv4() const; //! Whether this address has a linked IPv4 address (see GetLinkedIPv4()). bool HasLinkedIPv4() const; // The AS on the BGP path to the node we use to diversify // peers in AddrMan bucketing based on the AS infrastructure. // The ip->AS mapping depends on how asmap is constructed. uint32_t GetMappedAS(const std::vector &asmap) const; std::vector GetGroup(const std::vector &asmap) const; std::vector GetAddrBytes() const; int GetReachabilityFrom(const CNetAddr *paddrPartner = nullptr) const; explicit CNetAddr(const struct in6_addr &pipv6Addr, const uint32_t scope = 0); bool GetIn6Addr(struct in6_addr *pipv6Addr) const; friend bool operator==(const CNetAddr &a, const CNetAddr &b); friend bool operator!=(const CNetAddr &a, const CNetAddr &b) { return !(a == b); } friend bool operator<(const CNetAddr &a, const CNetAddr &b); /** * Serialize to a stream. */ template void Serialize(Stream &s) const { if (s.GetVersion() & ADDRV2_FORMAT) { SerializeV2Stream(s); } else { SerializeV1Stream(s); } } /** * Unserialize from a stream. */ template void Unserialize(Stream &s) { if (s.GetVersion() & ADDRV2_FORMAT) { UnserializeV2Stream(s); } else { UnserializeV1Stream(s); } } friend class CSubNet; private: /** * BIP155 network ids recognized by this software. */ enum BIP155Network : uint8_t { IPV4 = 1, IPV6 = 2, TORV2 = 3, TORV3 = 4, I2P = 5, CJDNS = 6, }; /** * Size of CNetAddr when serialized as ADDRv1 (pre-BIP155) (in bytes). */ static constexpr size_t V1_SERIALIZATION_SIZE = ADDR_IPV6_SIZE; /** * Maximum size of an address as defined in BIP155 (in bytes). * This is only the size of the address, not the entire CNetAddr object * when serialized. */ static constexpr size_t MAX_ADDRV2_SIZE = 512; /** * Get the BIP155 network id of this address. * Must not be called for IsInternal() objects. * @returns BIP155 network id */ BIP155Network GetBIP155Network() const; /** * Set `m_net` from the provided BIP155 network id and size after * validation. * @retval true the network was recognized, is valid and `m_net` was set * @retval false not recognised (from future?) and should be silently * ignored * @throws std::ios_base::failure if the network is one of the BIP155 * founding networks recognized by this software (id 1..6) with wrong * address size. */ bool SetNetFromBIP155Network(uint8_t possible_bip155_net, size_t address_size); /** * Serialize in pre-ADDRv2/BIP155 format to an array. */ void SerializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) const { size_t prefix_size; switch (m_net) { case NET_IPV6: assert(m_addr.size() == sizeof(arr)); memcpy(arr, m_addr.data(), m_addr.size()); return; case NET_IPV4: prefix_size = sizeof(IPV4_IN_IPV6_PREFIX); assert(prefix_size + m_addr.size() == sizeof(arr)); memcpy(arr, IPV4_IN_IPV6_PREFIX.data(), prefix_size); memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); return; case NET_ONION: if (m_addr.size() == ADDR_TORV3_SIZE) { break; } prefix_size = sizeof(TORV2_IN_IPV6_PREFIX); assert(prefix_size + m_addr.size() == sizeof(arr)); memcpy(arr, TORV2_IN_IPV6_PREFIX.data(), prefix_size); memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); return; case NET_INTERNAL: prefix_size = sizeof(INTERNAL_IN_IPV6_PREFIX); assert(prefix_size + m_addr.size() == sizeof(arr)); memcpy(arr, INTERNAL_IN_IPV6_PREFIX.data(), prefix_size); memcpy(arr + prefix_size, m_addr.data(), m_addr.size()); return; case NET_I2P: break; case NET_CJDNS: break; case NET_UNROUTABLE: case NET_MAX: assert(false); } // no default case, so the compiler can warn about missing cases // Serialize TORv3, I2P and CJDNS as all-zeros. memset(arr, 0x0, V1_SERIALIZATION_SIZE); } /** * Serialize in pre-ADDRv2/BIP155 format to a stream. */ template void SerializeV1Stream(Stream &s) const { uint8_t serialized[V1_SERIALIZATION_SIZE]; SerializeV1Array(serialized); s << serialized; } /** * Serialize as ADDRv2 / BIP155. */ template void SerializeV2Stream(Stream &s) const { if (IsInternal()) { // Serialize NET_INTERNAL as embedded in IPv6. We need to // serialize such addresses from addrman. s << static_cast(BIP155Network::IPV6); s << COMPACTSIZE(ADDR_IPV6_SIZE); SerializeV1Stream(s); return; } s << static_cast(GetBIP155Network()); s << m_addr; } /** * Unserialize from a pre-ADDRv2/BIP155 format from an array. */ void UnserializeV1Array(uint8_t (&arr)[V1_SERIALIZATION_SIZE]) { // Use SetLegacyIPv6() so that m_net is set correctly. For example // ::FFFF:0102:0304 should be set as m_net=NET_IPV4 (1.2.3.4). SetLegacyIPv6(arr); } /** * Unserialize from a pre-ADDRv2/BIP155 format from a stream. */ template void UnserializeV1Stream(Stream &s) { uint8_t serialized[V1_SERIALIZATION_SIZE]; s >> serialized; UnserializeV1Array(serialized); } /** * Unserialize from a ADDRv2 / BIP155 format. */ template void UnserializeV2Stream(Stream &s) { uint8_t bip155_net; s >> bip155_net; size_t address_size; s >> COMPACTSIZE(address_size); if (address_size > MAX_ADDRV2_SIZE) { throw std::ios_base::failure(strprintf( "Address too long: %u > %u", address_size, MAX_ADDRV2_SIZE)); } scopeId = 0; if (SetNetFromBIP155Network(bip155_net, address_size)) { m_addr.resize(address_size); s >> MakeSpan(m_addr); if (m_net != NET_IPV6) { return; } // Do some special checks on IPv6 addresses. // Recognize NET_INTERNAL embedded in IPv6, such addresses are not // gossiped but could be coming from addrman, when unserializing // from disk. if (HasPrefix(m_addr, INTERNAL_IN_IPV6_PREFIX)) { m_net = NET_INTERNAL; memmove(m_addr.data(), m_addr.data() + INTERNAL_IN_IPV6_PREFIX.size(), ADDR_INTERNAL_SIZE); m_addr.resize(ADDR_INTERNAL_SIZE); return; } if (!HasPrefix(m_addr, IPV4_IN_IPV6_PREFIX) && !HasPrefix(m_addr, TORV2_IN_IPV6_PREFIX)) { return; } // IPv4 and TORv2 are not supposed to be embedded in IPv6 (like in // V1 encoding). Unserialize as !IsValid(), thus ignoring them. } else { // If we receive an unknown BIP155 network id (from the future?) // then ignore the address - unserialize as !IsValid(). s.ignore(address_size); } // Mimic a default-constructed CNetAddr object which is !IsValid() and // thus will not be gossiped, but continue reading next addresses from // the stream. m_net = NET_IPV6; m_addr.assign(ADDR_IPV6_SIZE, 0x0); } }; class CSubNet { protected: /// Network (base) address CNetAddr network; /// Netmask, in network byte order uint8_t netmask[16]; /// Is this value valid? (only used to signal parse errors) bool valid; bool SanityCheck() const; public: CSubNet(); CSubNet(const CNetAddr &addr, uint8_t mask); CSubNet(const CNetAddr &addr, const CNetAddr &mask); // constructor for single ip subnet (/32 or /128) explicit CSubNet(const CNetAddr &addr); bool Match(const CNetAddr &addr) const; std::string ToString() const; bool IsValid() const; friend bool operator==(const CSubNet &a, const CSubNet &b); friend bool operator!=(const CSubNet &a, const CSubNet &b) { return !(a == b); } friend bool operator<(const CSubNet &a, const CSubNet &b); SERIALIZE_METHODS(CSubNet, obj) { READWRITE(obj.network); if (obj.network.IsIPv4()) { // Before D9176, CSubNet used the last 4 bytes of netmask to store // the relevant bytes for an IPv4 mask. For compatiblity reasons, // keep doing so in serialized form. uint8_t dummy[12] = {0}; READWRITE(dummy); READWRITE(MakeSpan(obj.netmask).first(4)); } else { READWRITE(obj.netmask); } READWRITE(obj.valid); // Mark invalid if the result doesn't pass sanity checking. SER_READ(obj, if (obj.valid) obj.valid = obj.SanityCheck()); } }; /** A combination of a network address (CNetAddr) and a (TCP) port */ class CService : public CNetAddr { protected: // host order uint16_t port; public: CService(); CService(const CNetAddr &ip, unsigned short port); CService(const struct in_addr &ipv4Addr, unsigned short port); explicit CService(const struct sockaddr_in &addr); unsigned short GetPort() const; bool GetSockAddr(struct sockaddr *paddr, socklen_t *addrlen) const; bool SetSockAddr(const struct sockaddr *paddr); friend bool operator==(const CService &a, const CService &b); friend bool operator!=(const CService &a, const CService &b) { return !(a == b); } friend bool operator<(const CService &a, const CService &b); std::vector GetKey() const; std::string ToString() const; std::string ToStringPort() const; std::string ToStringIPPort() const; CService(const struct in6_addr &ipv6Addr, unsigned short port); explicit CService(const struct sockaddr_in6 &addr); SERIALIZE_METHODS(CService, obj) { READWRITEAS(CNetAddr, obj); READWRITE(Using>(obj.port)); } }; bool SanityCheckASMap(const std::vector &asmap); #endif // BITCOIN_NETADDRESS_H diff --git a/src/protocol.cpp b/src/protocol.cpp index 34ed371b3..118a34bcc 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -1,288 +1,290 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #ifndef WIN32 #include #endif #include static std::atomic g_initial_block_download_completed(false); namespace NetMsgType { const char *VERSION = "version"; const char *VERACK = "verack"; const char *ADDR = "addr"; +const char *ADDRV2 = "addrv2"; +const char *SENDADDRV2 = "sendaddrv2"; const char *INV = "inv"; const char *GETDATA = "getdata"; const char *MERKLEBLOCK = "merkleblock"; const char *GETBLOCKS = "getblocks"; const char *GETHEADERS = "getheaders"; const char *TX = "tx"; const char *HEADERS = "headers"; const char *BLOCK = "block"; const char *GETADDR = "getaddr"; const char *MEMPOOL = "mempool"; const char *PING = "ping"; const char *PONG = "pong"; const char *NOTFOUND = "notfound"; const char *FILTERLOAD = "filterload"; const char *FILTERADD = "filteradd"; const char *FILTERCLEAR = "filterclear"; const char *SENDHEADERS = "sendheaders"; const char *FEEFILTER = "feefilter"; const char *SENDCMPCT = "sendcmpct"; const char *CMPCTBLOCK = "cmpctblock"; const char *GETBLOCKTXN = "getblocktxn"; const char *BLOCKTXN = "blocktxn"; const char *GETCFILTERS = "getcfilters"; const char *CFILTER = "cfilter"; const char *GETCFHEADERS = "getcfheaders"; const char *CFHEADERS = "cfheaders"; const char *GETCFCHECKPT = "getcfcheckpt"; const char *CFCHECKPT = "cfcheckpt"; const char *AVAHELLO = "avahello"; const char *AVAPOLL = "avapoll"; const char *AVARESPONSE = "avaresponse"; bool IsBlockLike(const std::string &strCommand) { return strCommand == NetMsgType::BLOCK || strCommand == NetMsgType::CMPCTBLOCK || strCommand == NetMsgType::BLOCKTXN; } }; // namespace NetMsgType /** * All known message types. Keep this in the same order as the list of messages * above and in protocol.h. */ static const std::string allNetMessageTypes[] = { - NetMsgType::VERSION, NetMsgType::VERACK, NetMsgType::ADDR, - NetMsgType::INV, NetMsgType::GETDATA, NetMsgType::MERKLEBLOCK, - NetMsgType::GETBLOCKS, NetMsgType::GETHEADERS, NetMsgType::TX, - NetMsgType::HEADERS, NetMsgType::BLOCK, NetMsgType::GETADDR, - NetMsgType::MEMPOOL, NetMsgType::PING, NetMsgType::PONG, - NetMsgType::NOTFOUND, NetMsgType::FILTERLOAD, NetMsgType::FILTERADD, - NetMsgType::FILTERCLEAR, NetMsgType::SENDHEADERS, NetMsgType::FEEFILTER, - NetMsgType::SENDCMPCT, NetMsgType::CMPCTBLOCK, NetMsgType::GETBLOCKTXN, - NetMsgType::BLOCKTXN, NetMsgType::GETCFILTERS, NetMsgType::CFILTER, - NetMsgType::GETCFHEADERS, NetMsgType::CFHEADERS, NetMsgType::GETCFCHECKPT, - NetMsgType::CFCHECKPT, + NetMsgType::VERSION, NetMsgType::VERACK, NetMsgType::ADDR, + NetMsgType::ADDRV2, NetMsgType::SENDADDRV2, NetMsgType::INV, + NetMsgType::GETDATA, NetMsgType::MERKLEBLOCK, NetMsgType::GETBLOCKS, + NetMsgType::GETHEADERS, NetMsgType::TX, NetMsgType::HEADERS, + NetMsgType::BLOCK, NetMsgType::GETADDR, NetMsgType::MEMPOOL, + NetMsgType::PING, NetMsgType::PONG, NetMsgType::NOTFOUND, + NetMsgType::FILTERLOAD, NetMsgType::FILTERADD, NetMsgType::FILTERCLEAR, + NetMsgType::SENDHEADERS, NetMsgType::FEEFILTER, NetMsgType::SENDCMPCT, + NetMsgType::CMPCTBLOCK, NetMsgType::GETBLOCKTXN, NetMsgType::BLOCKTXN, + NetMsgType::GETCFILTERS, NetMsgType::CFILTER, NetMsgType::GETCFHEADERS, + NetMsgType::CFHEADERS, NetMsgType::GETCFCHECKPT, NetMsgType::CFCHECKPT, }; static const std::vector allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes + ARRAYLEN(allNetMessageTypes)); CMessageHeader::CMessageHeader(const MessageMagic &pchMessageStartIn) { memcpy(std::begin(pchMessageStart), std::begin(pchMessageStartIn), MESSAGE_START_SIZE); memset(pchCommand.data(), 0, sizeof(pchCommand)); nMessageSize = -1; memset(pchChecksum, 0, CHECKSUM_SIZE); } CMessageHeader::CMessageHeader(const MessageMagic &pchMessageStartIn, const char *pszCommand, unsigned int nMessageSizeIn) { memcpy(std::begin(pchMessageStart), std::begin(pchMessageStartIn), MESSAGE_START_SIZE); // Copy the command name, zero-padding to COMMAND_SIZE bytes size_t i = 0; for (; i < pchCommand.size() && pszCommand[i] != 0; ++i) { pchCommand[i] = pszCommand[i]; } // Assert that the command name passed in is not longer than COMMAND_SIZE assert(pszCommand[i] == 0); for (; i < pchCommand.size(); ++i) { pchCommand[i] = 0; } nMessageSize = nMessageSizeIn; memset(pchChecksum, 0, CHECKSUM_SIZE); } std::string CMessageHeader::GetCommand() const { // return std::string(pchCommand.begin(), pchCommand.end()); return std::string(pchCommand.data(), pchCommand.data() + strnlen(pchCommand.data(), COMMAND_SIZE)); } static bool CheckHeaderMagicAndCommand(const CMessageHeader &header, const CMessageHeader::MessageMagic &magic) { // Check start string if (memcmp(std::begin(header.pchMessageStart), std::begin(magic), CMessageHeader::MESSAGE_START_SIZE) != 0) { return false; } // Check the command string for errors for (const char *p1 = header.pchCommand.data(); p1 < header.pchCommand.data() + CMessageHeader::COMMAND_SIZE; p1++) { if (*p1 == 0) { // Must be all zeros after the first zero for (; p1 < header.pchCommand.data() + CMessageHeader::COMMAND_SIZE; p1++) { if (*p1 != 0) { return false; } } } else if (*p1 < ' ' || *p1 > 0x7E) { return false; } } return true; } bool CMessageHeader::IsValid(const Config &config) const { // Check start string if (!CheckHeaderMagicAndCommand(*this, config.GetChainParams().NetMagic())) { return false; } // Message size if (IsOversized(config)) { LogPrintf("CMessageHeader::IsValid(): (%s, %u bytes) is oversized\n", GetCommand(), nMessageSize); return false; } return true; } /** * This is a transition method in order to stay compatible with older code that * do not use the config. It assumes message will not get too large. This cannot * be used for any piece of code that will download blocks as blocks may be * bigger than the permitted size. Idealy, code that uses this function should * be migrated toward using the config. */ bool CMessageHeader::IsValidWithoutConfig(const MessageMagic &magic) const { // Check start string if (!CheckHeaderMagicAndCommand(*this, magic)) { return false; } // Message size if (nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) { LogPrintf( "CMessageHeader::IsValidForSeeder(): (%s, %u bytes) is oversized\n", GetCommand(), nMessageSize); return false; } return true; } bool CMessageHeader::IsOversized(const Config &config) const { // If the message doesn't not contain a block content, check against // MAX_PROTOCOL_MESSAGE_LENGTH. if (nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH && !NetMsgType::IsBlockLike(GetCommand())) { return true; } // Scale the maximum accepted size with the block size. if (nMessageSize > 2 * config.GetMaxBlockSize()) { return true; } return false; } ServiceFlags GetDesirableServiceFlags(ServiceFlags services) { if ((services & NODE_NETWORK_LIMITED) && g_initial_block_download_completed) { return ServiceFlags(NODE_NETWORK_LIMITED); } return ServiceFlags(NODE_NETWORK); } void SetServiceFlagsIBDCache(bool state) { g_initial_block_download_completed = state; } std::string CInv::GetCommand() const { std::string cmd; switch (GetKind()) { case MSG_TX: return cmd.append(NetMsgType::TX); case MSG_BLOCK: return cmd.append(NetMsgType::BLOCK); case MSG_FILTERED_BLOCK: return cmd.append(NetMsgType::MERKLEBLOCK); case MSG_CMPCT_BLOCK: return cmd.append(NetMsgType::CMPCTBLOCK); default: throw std::out_of_range( strprintf("CInv::GetCommand(): type=%d unknown type", type)); } } std::string CInv::ToString() const { try { return strprintf("%s %s", GetCommand(), hash.ToString()); } catch (const std::out_of_range &) { return strprintf("0x%08x %s", type, hash.ToString()); } } const std::vector &getAllNetMessageTypes() { return allNetMessageTypesVec; } /** * Convert a service flag (NODE_*) to a human readable string. * It supports unknown service flags which will be returned as "UNKNOWN[...]". * @param[in] bit the service flag is calculated as (1 << bit) */ static std::string serviceFlagToStr(const size_t bit) { const uint64_t service_flag = 1ULL << bit; switch (ServiceFlags(service_flag)) { case NODE_NONE: // impossible abort(); case NODE_NETWORK: return "NETWORK"; case NODE_GETUTXO: return "GETUTXO"; case NODE_BLOOM: return "BLOOM"; case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED"; case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS"; case NODE_AVALANCHE: return "AVALANCHE"; default: std::ostringstream stream; stream.imbue(std::locale::classic()); stream << "UNKNOWN["; stream << "2^" << bit; stream << "]"; return stream.str(); } } std::vector serviceFlagsToStr(const uint64_t flags) { std::vector str_flags; for (size_t i = 0; i < sizeof(flags) * 8; ++i) { if (flags & (1ULL << i)) { str_flags.emplace_back(serviceFlagToStr(i)); } } return str_flags; } diff --git a/src/protocol.h b/src/protocol.h index 8132f4e87..a332de7e0 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -1,523 +1,536 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #ifndef __cplusplus #error This header can only be compiled as C++. #endif #ifndef BITCOIN_PROTOCOL_H #define BITCOIN_PROTOCOL_H #include #include #include #include #include #include #include class Config; /** * Maximum length of incoming protocol messages (Currently 2MB). * NB: Messages propagating block content are not subject to this limit. */ static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 2 * 1024 * 1024; /** * Message header. * (4) message start. * (12) command. * (4) size. * (4) checksum. */ class CMessageHeader { public: static constexpr size_t MESSAGE_START_SIZE = 4; static constexpr size_t COMMAND_SIZE = 12; static constexpr size_t MESSAGE_SIZE_SIZE = 4; static constexpr size_t CHECKSUM_SIZE = 4; static constexpr size_t MESSAGE_SIZE_OFFSET = MESSAGE_START_SIZE + COMMAND_SIZE; static constexpr size_t CHECKSUM_OFFSET = MESSAGE_SIZE_OFFSET + MESSAGE_SIZE_SIZE; static constexpr size_t HEADER_SIZE = MESSAGE_START_SIZE + COMMAND_SIZE + MESSAGE_SIZE_SIZE + CHECKSUM_SIZE; typedef std::array MessageMagic; explicit CMessageHeader(const MessageMagic &pchMessageStartIn); /** * Construct a P2P message header from message-start characters, a command * and the size of the message. * @note Passing in a `pszCommand` longer than COMMAND_SIZE will result in a * run-time assertion error. */ CMessageHeader(const MessageMagic &pchMessageStartIn, const char *pszCommand, unsigned int nMessageSizeIn); std::string GetCommand() const; bool IsValid(const Config &config) const; bool IsValidWithoutConfig(const MessageMagic &magic) const; bool IsOversized(const Config &config) const; SERIALIZE_METHODS(CMessageHeader, obj) { READWRITE(obj.pchMessageStart, obj.pchCommand, obj.nMessageSize, obj.pchChecksum); } MessageMagic pchMessageStart; std::array pchCommand; uint32_t nMessageSize; uint8_t pchChecksum[CHECKSUM_SIZE]; }; /** * Bitcoin protocol message types. When adding new message types, don't forget * to update allNetMessageTypes in protocol.cpp. */ namespace NetMsgType { /** * The version message provides information about the transmitting node to the * receiving node at the beginning of a connection. * @see https://bitcoin.org/en/developer-reference#version */ extern const char *VERSION; /** * The verack message acknowledges a previously-received version message, * informing the connecting node that it can begin to send other messages. * @see https://bitcoin.org/en/developer-reference#verack */ extern const char *VERACK; /** * The addr (IP address) message relays connection information for peers on the * network. * @see https://bitcoin.org/en/developer-reference#addr */ extern const char *ADDR; +/** + * The addrv2 message relays connection information for peers on the network + * just like the addr message, but is extended to allow gossiping of longer node + * addresses (see BIP155). + */ +extern const char *ADDRV2; +/** + * The sendaddrv2 message signals support for receiving ADDRV2 messages + * (BIP155). It also implies that its sender can encode as ADDRV2 and would send + * ADDRV2 instead of ADDR to a peer that has signaled ADDRV2 support by sending + * SENDADDRV2. + */ +extern const char *SENDADDRV2; /** * The inv message (inventory message) transmits one or more inventories of * objects known to the transmitting peer. * @see https://bitcoin.org/en/developer-reference#inv */ extern const char *INV; /** * The getdata message requests one or more data objects from another node. * @see https://bitcoin.org/en/developer-reference#getdata */ extern const char *GETDATA; /** * The merkleblock message is a reply to a getdata message which requested a * block using the inventory type MSG_MERKLEBLOCK. * @since protocol version 70001 as described by BIP37. * @see https://bitcoin.org/en/developer-reference#merkleblock */ extern const char *MERKLEBLOCK; /** * The getblocks message requests an inv message that provides block header * hashes starting from a particular point in the block chain. * @see https://bitcoin.org/en/developer-reference#getblocks */ extern const char *GETBLOCKS; /** * The getheaders message requests a headers message that provides block * headers starting from a particular point in the block chain. * @since protocol version 31800. * @see https://bitcoin.org/en/developer-reference#getheaders */ extern const char *GETHEADERS; /** * The tx message transmits a single transaction. * @see https://bitcoin.org/en/developer-reference#tx */ extern const char *TX; /** * The headers message sends one or more block headers to a node which * previously requested certain headers with a getheaders message. * @since protocol version 31800. * @see https://bitcoin.org/en/developer-reference#headers */ extern const char *HEADERS; /** * The block message transmits a single serialized block. * @see https://bitcoin.org/en/developer-reference#block */ extern const char *BLOCK; /** * The getaddr message requests an addr message from the receiving node, * preferably one with lots of IP addresses of other receiving nodes. * @see https://bitcoin.org/en/developer-reference#getaddr */ extern const char *GETADDR; /** * The mempool message requests the TXIDs of transactions that the receiving * node has verified as valid but which have not yet appeared in a block. * @since protocol version 60002. * @see https://bitcoin.org/en/developer-reference#mempool */ extern const char *MEMPOOL; /** * The ping message is sent periodically to help confirm that the receiving * peer is still connected. * @see https://bitcoin.org/en/developer-reference#ping */ extern const char *PING; /** * The pong message replies to a ping message, proving to the pinging node that * the ponging node is still alive. * @since protocol version 60001 as described by BIP31. * @see https://bitcoin.org/en/developer-reference#pong */ extern const char *PONG; /** * The notfound message is a reply to a getdata message which requested an * object the receiving node does not have available for relay. * @since protocol version 70001. * @see https://bitcoin.org/en/developer-reference#notfound */ extern const char *NOTFOUND; /** * The filterload message tells the receiving peer to filter all relayed * transactions and requested merkle blocks through the provided filter. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filterload */ extern const char *FILTERLOAD; /** * The filteradd message tells the receiving peer to add a single element to a * previously-set bloom filter, such as a new public key. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filteradd */ extern const char *FILTERADD; /** * The filterclear message tells the receiving peer to remove a previously-set * bloom filter. * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. * @see https://bitcoin.org/en/developer-reference#filterclear */ extern const char *FILTERCLEAR; /** * Indicates that a node prefers to receive new block announcements via a * "headers" message rather than an "inv". * @since protocol version 70012 as described by BIP130. * @see https://bitcoin.org/en/developer-reference#sendheaders */ extern const char *SENDHEADERS; /** * The feefilter message tells the receiving peer not to inv us any txs * which do not meet the specified min fee rate. * @since protocol version 70013 as described by BIP133 */ extern const char *FEEFILTER; /** * Contains a 1-byte bool and 8-byte LE version number. * Indicates that a node is willing to provide blocks via "cmpctblock" messages. * May indicate that a node prefers to receive new block announcements via a * "cmpctblock" message rather than an "inv", depending on message contents. * @since protocol version 70014 as described by BIP 152 */ extern const char *SENDCMPCT; /** * Contains a CBlockHeaderAndShortTxIDs object - providing a header and * list of "short txids". * @since protocol version 70014 as described by BIP 152 */ extern const char *CMPCTBLOCK; /** * Contains a BlockTransactionsRequest * Peer should respond with "blocktxn" message. * @since protocol version 70014 as described by BIP 152 */ extern const char *GETBLOCKTXN; /** * Contains a BlockTransactions. * Sent in response to a "getblocktxn" message. * @since protocol version 70014 as described by BIP 152 */ extern const char *BLOCKTXN; /** * getcfilters requests compact filters for a range of blocks. * Only available with service bit NODE_COMPACT_FILTERS as described by * BIP 157 & 158. */ extern const char *GETCFILTERS; /** * cfilter is a response to a getcfilters request containing a single compact * filter. */ extern const char *CFILTER; /** * getcfheaders requests a compact filter header and the filter hashes for a * range of blocks, which can then be used to reconstruct the filter headers * for those blocks. * Only available with service bit NODE_COMPACT_FILTERS as described by * BIP 157 & 158. */ extern const char *GETCFHEADERS; /** * cfheaders is a response to a getcfheaders request containing a filter header * and a vector of filter hashes for each subsequent block in the requested * range. */ extern const char *CFHEADERS; /** * getcfcheckpt requests evenly spaced compact filter headers, enabling * parallelized download and validation of the headers between them. * Only available with service bit NODE_COMPACT_FILTERS as described by * BIP 157 & 158. */ extern const char *GETCFCHECKPT; /** * cfcheckpt is a response to a getcfcheckpt request containing a vector of * evenly spaced filter headers for blocks on the requested chain. */ extern const char *CFCHECKPT; /** * Contains a delegation and a signature. */ extern const char *AVAHELLO; /** * Contains an avalanche::Poll. * Peer should respond with "avaresponse" message. */ extern const char *AVAPOLL; /** * Contains an avalanche::Response. * Sent in response to a "avapoll" message. */ extern const char *AVARESPONSE; /** * Indicate if the message is used to transmit the content of a block. * These messages can be significantly larger than usual messages and therefore * may need to be processed differently. */ bool IsBlockLike(const std::string &strCommand); }; // namespace NetMsgType /** Get a vector of all valid message types (see above) */ const std::vector &getAllNetMessageTypes(); /** * nServices flags. */ enum ServiceFlags : uint64_t { // NOTE: When adding here, be sure to update serviceFlagToStr too // Nothing NODE_NONE = 0, // NODE_NETWORK means that the node is capable of serving the complete block // chain. It is currently set by all Bitcoin ABC non pruned nodes, and is // unset by SPV clients or other light clients. NODE_NETWORK = (1 << 0), // NODE_GETUTXO means the node is capable of responding to the getutxo // protocol request. Bitcoin ABC does not support this but a patch set // called Bitcoin XT does. See BIP 64 for details on how this is // implemented. NODE_GETUTXO = (1 << 1), // NODE_BLOOM means the node is capable and willing to handle bloom-filtered // connections. Bitcoin ABC nodes used to support this by default, without // advertising this bit, but no longer do as of protocol version 70011 (= // NO_BLOOM_VERSION) NODE_BLOOM = (1 << 2), // Bit 4 was NODE_XTHIN, removed in v0.22.12 // Bit 5 was NODE_BITCOIN_CASH, removed in v0.22.8 // NODE_COMPACT_FILTERS means the node will service basic block filter // requests. // See BIP157 and BIP158 for details on how this is implemented. NODE_COMPACT_FILTERS = (1 << 6), // NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation // of only serving the last 288 (2 day) blocks // See BIP159 for details on how this is implemented. NODE_NETWORK_LIMITED = (1 << 10), // The last non experimental service bit, helper for looping over the flags NODE_LAST_NON_EXPERIMENTAL_SERVICE_BIT = (1 << 23), // Bits 24-31 are reserved for temporary experiments. Just pick a bit that // isn't getting used, or one not being used much, and notify the // bitcoin-development mailing list. Remember that service bits are just // unauthenticated advertisements, so your code must be robust against // collisions and other cases where nodes may be advertising a service they // do not actually support. Other service bits should be allocated via the // BIP process. // NODE_AVALANCHE means the node supports Bitcoin Cash's avalanche // preconsensus mechanism. NODE_AVALANCHE = (1 << 24), }; /** * Convert service flags (a bitmask of NODE_*) to human readable strings. * It supports unknown service flags which will be returned as "UNKNOWN[...]". * @param[in] flags multiple NODE_* bitwise-OR-ed together */ std::vector serviceFlagsToStr(const uint64_t flags); /** * Gets the set of service flags which are "desirable" for a given peer. * * These are the flags which are required for a peer to support for them * to be "interesting" to us, ie for us to wish to use one of our few * outbound connection slots for or for us to wish to prioritize keeping * their connection around. * * Relevant service flags may be peer- and state-specific in that the * version of the peer may determine which flags are required (eg in the * case of NODE_NETWORK_LIMITED where we seek out NODE_NETWORK peers * unless they set NODE_NETWORK_LIMITED and we are out of IBD, in which * case NODE_NETWORK_LIMITED suffices). * * Thus, generally, avoid calling with peerServices == NODE_NONE, unless * state-specific flags must absolutely be avoided. When called with * peerServices == NODE_NONE, the returned desirable service flags are * guaranteed to not change dependent on state - ie they are suitable for * use when describing peers which we know to be desirable, but for which * we do not have a confirmed set of service flags. * * If the NODE_NONE return value is changed, contrib/seeds/makeseeds.py * should be updated appropriately to filter for the same nodes. */ ServiceFlags GetDesirableServiceFlags(ServiceFlags services); /** * Set the current IBD status in order to figure out the desirable service * flags */ void SetServiceFlagsIBDCache(bool status); /** * A shortcut for (services & GetDesirableServiceFlags(services)) * == GetDesirableServiceFlags(services), ie determines whether the given * set of service flags are sufficient for a peer to be "relevant". */ static inline bool HasAllDesirableServiceFlags(ServiceFlags services) { return !(GetDesirableServiceFlags(services) & (~services)); } /** * Checks if a peer with the given service flags may be capable of having a * robust address-storage DB. */ static inline bool MayHaveUsefulAddressDB(ServiceFlags services) { return (services & NODE_NETWORK) || (services & NODE_NETWORK_LIMITED); } /** * A CService with information about it as peer. */ class CAddress : public CService { static constexpr uint32_t TIME_INIT{100000000}; public: CAddress() : CService{} {}; CAddress(CService ipIn, ServiceFlags nServicesIn) : CService{ipIn}, nServices{nServicesIn} {}; CAddress(CService ipIn, ServiceFlags nServicesIn, uint32_t nTimeIn) : CService{ipIn}, nTime{nTimeIn}, nServices{nServicesIn} {}; void Init(); SERIALIZE_METHODS(CAddress, obj) { SER_READ(obj, obj.nTime = TIME_INIT); int nVersion = s.GetVersion(); if (s.GetType() & SER_DISK) { READWRITE(nVersion); } if ((s.GetType() & SER_DISK) || (nVersion != INIT_PROTO_VERSION && !(s.GetType() & SER_GETHASH))) { // The only time we serialize a CAddress object without nTime is in // the initial VERSION messages which contain two CAddress records. // At that point, the serialization version is INIT_PROTO_VERSION. // After the version handshake, serialization version is >= // MIN_PEER_PROTO_VERSION and all ADDR messages are serialized with // nTime. READWRITE(obj.nTime); } if (nVersion & ADDRV2_FORMAT) { uint64_t services_tmp; SER_WRITE(obj, services_tmp = obj.nServices); READWRITE(Using>(services_tmp)); SER_READ(obj, obj.nServices = static_cast(services_tmp)); } else { READWRITE(Using>(obj.nServices)); } READWRITEAS(CService, obj); } // disk and network only uint32_t nTime{TIME_INIT}; ServiceFlags nServices{NODE_NONE}; }; /** getdata message type flags */ const uint32_t MSG_TYPE_MASK = 0xffffffff >> 3; /** * getdata / inv message types. * These numbers are defined by the protocol. When adding a new value, be sure * to mention it in the respective BIP. */ enum GetDataMsg { UNDEFINED = 0, MSG_TX = 1, MSG_BLOCK = 2, // The following can only occur in getdata. Invs always use TX or BLOCK. //! Defined in BIP37 MSG_FILTERED_BLOCK = 3, //! Defined in BIP152 MSG_CMPCT_BLOCK = 4, }; /** * Inv(ventory) message data. * Intended as non-ambiguous identifier of objects (eg. transactions, blocks) * held by peers. */ class CInv { public: uint32_t type; uint256 hash; CInv() : type(0), hash() {} CInv(uint32_t typeIn, const uint256 &hashIn) : type(typeIn), hash(hashIn) {} SERIALIZE_METHODS(CInv, obj) { READWRITE(obj.type, obj.hash); } friend bool operator<(const CInv &a, const CInv &b) { return a.type < b.type || (a.type == b.type && a.hash < b.hash); } std::string GetCommand() const; std::string ToString() const; uint32_t GetKind() const { return type & MSG_TYPE_MASK; } bool IsTx() const { auto k = GetKind(); return k == MSG_TX; } bool IsSomeBlock() const { auto k = GetKind(); return k == MSG_BLOCK || k == MSG_FILTERED_BLOCK || k == MSG_CMPCT_BLOCK; } }; #endif // BITCOIN_PROTOCOL_H diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 02ea839be..13f9269af 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -1,743 +1,760 @@ // Copyright (c) 2012-2019 The Bitcoin Core developers // Copyright (c) 2017-2019 The Bitcoin developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include class CAddrManSerializationMock : public CAddrMan { public: virtual void Serialize(CDataStream &s) const = 0; //! Ensure that bucket placement is always the same for testing purposes. void MakeDeterministic() { nKey.SetNull(); insecure_rand = FastRandomContext(true); } }; class CAddrManUncorrupted : public CAddrManSerializationMock { public: void Serialize(CDataStream &s) const override { CAddrMan::Serialize(s); } }; class CAddrManCorrupted : public CAddrManSerializationMock { public: void Serialize(CDataStream &s) const override { // Produces corrupt output that claims addrman has 20 addrs when it only // has one addr. uint8_t nVersion = 1; s << nVersion; s << uint8_t(32); s << nKey; s << 10; // nNew s << 10; // nTried int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); s << nUBuckets; CService serv; BOOST_CHECK(Lookup("252.1.1.1", serv, 7777, false)); CAddress addr = CAddress(serv, NODE_NONE); CNetAddr resolved; BOOST_CHECK(LookupHost("252.2.2.2", resolved, false)); CAddrInfo info = CAddrInfo(addr, resolved); s << info; } }; class NetTestConfig : public DummyConfig { public: bool SetMaxBlockSize(uint64_t maxBlockSize) override { nMaxBlockSize = maxBlockSize; return true; } uint64_t GetMaxBlockSize() const override { return nMaxBlockSize; } private: uint64_t nMaxBlockSize; }; static CDataStream AddrmanToStream(CAddrManSerializationMock &_addrman) { CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION); ssPeersIn << Params().DiskMagic(); ssPeersIn << _addrman; std::string str = ssPeersIn.str(); std::vector vchData(str.begin(), str.end()); return CDataStream(vchData, SER_DISK, CLIENT_VERSION); } BOOST_FIXTURE_TEST_SUITE(net_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(cnode_listen_port) { // test default unsigned short port = GetListenPort(); BOOST_CHECK(port == Params().GetDefaultPort()); // test set port unsigned short altPort = 12345; BOOST_CHECK(gArgs.SoftSetArg("-port", ToString(altPort))); port = GetListenPort(); BOOST_CHECK(port == altPort); } BOOST_AUTO_TEST_CASE(caddrdb_read) { CAddrManUncorrupted addrmanUncorrupted; addrmanUncorrupted.MakeDeterministic(); CService addr1, addr2, addr3; BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false)); BOOST_CHECK(Lookup("250.7.2.2", addr2, 9999, false)); BOOST_CHECK(Lookup("250.7.3.3", addr3, 9999, false)); BOOST_CHECK(Lookup(std::string("250.7.3.3", 9), addr3, 9999, false)); BOOST_CHECK( !Lookup(std::string("250.7.3.3\0example.com", 21), addr3, 9999, false)); // Add three addresses to new table. CService source; BOOST_CHECK(Lookup("252.5.1.1", source, 8333, false)); BOOST_CHECK(addrmanUncorrupted.Add(CAddress(addr1, NODE_NONE), source)); BOOST_CHECK(addrmanUncorrupted.Add(CAddress(addr2, NODE_NONE), source)); BOOST_CHECK(addrmanUncorrupted.Add(CAddress(addr3, NODE_NONE), source)); // Test that the de-serialization does not throw an exception. CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted); bool exceptionThrown = false; CAddrMan addrman1; BOOST_CHECK(addrman1.size() == 0); try { uint8_t pchMsgTmp[4]; ssPeers1 >> pchMsgTmp; ssPeers1 >> addrman1; } catch (const std::exception &) { exceptionThrown = true; } BOOST_CHECK(addrman1.size() == 3); BOOST_CHECK(exceptionThrown == false); // Test that CAddrDB::Read creates an addrman with the correct number of // addrs. CDataStream ssPeers2 = AddrmanToStream(addrmanUncorrupted); CAddrMan addrman2; CAddrDB adb(Params()); BOOST_CHECK(addrman2.size() == 0); BOOST_CHECK(adb.Read(addrman2, ssPeers2)); BOOST_CHECK(addrman2.size() == 3); } BOOST_AUTO_TEST_CASE(caddrdb_read_corrupted) { CAddrManCorrupted addrmanCorrupted; addrmanCorrupted.MakeDeterministic(); // Test that the de-serialization of corrupted addrman throws an exception. CDataStream ssPeers1 = AddrmanToStream(addrmanCorrupted); bool exceptionThrown = false; CAddrMan addrman1; BOOST_CHECK(addrman1.size() == 0); try { uint8_t pchMsgTmp[4]; ssPeers1 >> pchMsgTmp; ssPeers1 >> addrman1; } catch (const std::exception &) { exceptionThrown = true; } // Even through de-serialization failed addrman is not left in a clean // state. BOOST_CHECK(addrman1.size() == 1); BOOST_CHECK(exceptionThrown); // Test that CAddrDB::Read leaves addrman in a clean state if // de-serialization fails. CDataStream ssPeers2 = AddrmanToStream(addrmanCorrupted); CAddrMan addrman2; CAddrDB adb(Params()); BOOST_CHECK(addrman2.size() == 0); BOOST_CHECK(!adb.Read(addrman2, ssPeers2)); BOOST_CHECK(addrman2.size() == 0); } BOOST_AUTO_TEST_CASE(cnode_simple_test) { SOCKET hSocket = INVALID_SOCKET; NodeId id = 0; int height = 0; in_addr ipv4Addr; ipv4Addr.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK); std::string pszDest; auto pnode1 = std::make_unique(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, 0, CAddress(), pszDest, ConnectionType::OUTBOUND); BOOST_CHECK(pnode1->IsInboundConn() == false); auto pnode2 = std::make_unique(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, 1, CAddress(), pszDest, ConnectionType::INBOUND); BOOST_CHECK(pnode2->IsInboundConn() == true); } BOOST_AUTO_TEST_CASE(test_getSubVersionEB) { BOOST_CHECK_EQUAL(getSubVersionEB(13800000000), "13800.0"); BOOST_CHECK_EQUAL(getSubVersionEB(3800000000), "3800.0"); BOOST_CHECK_EQUAL(getSubVersionEB(14000000), "14.0"); BOOST_CHECK_EQUAL(getSubVersionEB(1540000), "1.5"); BOOST_CHECK_EQUAL(getSubVersionEB(1560000), "1.5"); BOOST_CHECK_EQUAL(getSubVersionEB(210000), "0.2"); BOOST_CHECK_EQUAL(getSubVersionEB(10000), "0.0"); BOOST_CHECK_EQUAL(getSubVersionEB(0), "0.0"); } BOOST_AUTO_TEST_CASE(test_userAgent) { NetTestConfig config; config.SetMaxBlockSize(8000000); const std::string uacomment = "A very nice comment"; gArgs.ForceSetMultiArg("-uacomment", {uacomment}); const std::string versionMessage = "/Bitcoin ABC:" + ToString(CLIENT_VERSION_MAJOR) + "." + ToString(CLIENT_VERSION_MINOR) + "." + ToString(CLIENT_VERSION_REVISION) + "(EB8.0; " + uacomment + ")/"; BOOST_CHECK_EQUAL(userAgent(config), versionMessage); } BOOST_AUTO_TEST_CASE(LimitedAndReachable_Network) { BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), true); BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), true); BOOST_CHECK_EQUAL(IsReachable(NET_ONION), true); SetReachable(NET_IPV4, false); SetReachable(NET_IPV6, false); SetReachable(NET_ONION, false); BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), false); BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), false); BOOST_CHECK_EQUAL(IsReachable(NET_ONION), false); SetReachable(NET_IPV4, true); SetReachable(NET_IPV6, true); SetReachable(NET_ONION, true); BOOST_CHECK_EQUAL(IsReachable(NET_IPV4), true); BOOST_CHECK_EQUAL(IsReachable(NET_IPV6), true); BOOST_CHECK_EQUAL(IsReachable(NET_ONION), true); } BOOST_AUTO_TEST_CASE(LimitedAndReachable_NetworkCaseUnroutableAndInternal) { BOOST_CHECK_EQUAL(IsReachable(NET_UNROUTABLE), true); BOOST_CHECK_EQUAL(IsReachable(NET_INTERNAL), true); SetReachable(NET_UNROUTABLE, false); SetReachable(NET_INTERNAL, false); // Ignored for both networks BOOST_CHECK_EQUAL(IsReachable(NET_UNROUTABLE), true); BOOST_CHECK_EQUAL(IsReachable(NET_INTERNAL), true); } CNetAddr UtilBuildAddress(uint8_t p1, uint8_t p2, uint8_t p3, uint8_t p4) { uint8_t ip[] = {p1, p2, p3, p4}; struct sockaddr_in sa; // initialize the memory block memset(&sa, 0, sizeof(sockaddr_in)); memcpy(&(sa.sin_addr), &ip, sizeof(ip)); return CNetAddr(sa.sin_addr); } BOOST_AUTO_TEST_CASE(LimitedAndReachable_CNetAddr) { // 1.1.1.1 CNetAddr addr = UtilBuildAddress(0x001, 0x001, 0x001, 0x001); SetReachable(NET_IPV4, true); BOOST_CHECK_EQUAL(IsReachable(addr), true); SetReachable(NET_IPV4, false); BOOST_CHECK_EQUAL(IsReachable(addr), false); // have to reset this, because this is stateful. SetReachable(NET_IPV4, true); } BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle) { // 2.1.1.1:1000 CService addr = CService(UtilBuildAddress(0x002, 0x001, 0x001, 0x001), 1000); SetReachable(NET_IPV4, true); BOOST_CHECK_EQUAL(IsLocal(addr), false); BOOST_CHECK_EQUAL(AddLocal(addr, 1000), true); BOOST_CHECK_EQUAL(IsLocal(addr), true); RemoveLocal(addr); BOOST_CHECK_EQUAL(IsLocal(addr), false); } BOOST_AUTO_TEST_CASE(cnetaddr_basic) { CNetAddr addr; // IPv4, INADDR_ANY BOOST_REQUIRE(LookupHost("0.0.0.0", addr, false)); BOOST_REQUIRE(!addr.IsValid()); BOOST_REQUIRE(addr.IsIPv4()); BOOST_CHECK(addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "0.0.0.0"); // IPv4, INADDR_NONE BOOST_REQUIRE(LookupHost("255.255.255.255", addr, false)); BOOST_REQUIRE(!addr.IsValid()); BOOST_REQUIRE(addr.IsIPv4()); BOOST_CHECK(!addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "255.255.255.255"); // IPv4, casual BOOST_REQUIRE(LookupHost("12.34.56.78", addr, false)); BOOST_REQUIRE(addr.IsValid()); BOOST_REQUIRE(addr.IsIPv4()); BOOST_CHECK(!addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "12.34.56.78"); // IPv6, in6addr_any BOOST_REQUIRE(LookupHost("::", addr, false)); BOOST_REQUIRE(!addr.IsValid()); BOOST_REQUIRE(addr.IsIPv6()); BOOST_CHECK(addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "::"); // IPv6, casual BOOST_REQUIRE( LookupHost("1122:3344:5566:7788:9900:aabb:ccdd:eeff", addr, false)); BOOST_REQUIRE(addr.IsValid()); BOOST_REQUIRE(addr.IsIPv6()); BOOST_CHECK(!addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "1122:3344:5566:7788:9900:aabb:ccdd:eeff"); // TORv2 BOOST_REQUIRE(addr.SetSpecial("6hzph5hv6337r6p2.onion")); BOOST_REQUIRE(addr.IsValid()); BOOST_REQUIRE(addr.IsTor()); BOOST_CHECK(!addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "6hzph5hv6337r6p2.onion"); // TORv3 const char *torv3_addr = "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion"; BOOST_REQUIRE(addr.SetSpecial(torv3_addr)); BOOST_REQUIRE(addr.IsValid()); BOOST_REQUIRE(addr.IsTor()); BOOST_CHECK(!addr.IsBindAny()); + BOOST_CHECK(!addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), torv3_addr); // TORv3, broken, with wrong checksum BOOST_CHECK(!addr.SetSpecial( "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscsad.onion")); // TORv3, broken, with wrong version BOOST_CHECK(!addr.SetSpecial( "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscrye.onion")); // TORv3, malicious BOOST_CHECK(!addr.SetSpecial(std::string{ "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd\0wtf.onion", 66})); // TOR, bogus length BOOST_CHECK(!addr.SetSpecial(std::string{"mfrggzak.onion"})); // TOR, invalid base32 BOOST_CHECK(!addr.SetSpecial(std::string{"mf*g zak.onion"})); // Internal addr.SetInternal("esffpp"); // "internal" is considered invalid BOOST_REQUIRE(!addr.IsValid()); BOOST_REQUIRE(addr.IsInternal()); BOOST_CHECK(!addr.IsBindAny()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "esffpvrt3wpeaygy.internal"); // Totally bogus BOOST_CHECK(!addr.SetSpecial("totally bogus")); } BOOST_AUTO_TEST_CASE(cnetaddr_serialize_v1) { CNetAddr addr; CDataStream s(SER_NETWORK, PROTOCOL_VERSION); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "00000000000000000000000000000000"); s.clear(); BOOST_REQUIRE(LookupHost("1.2.3.4", addr, false)); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "00000000000000000000ffff01020304"); s.clear(); BOOST_REQUIRE( LookupHost("1a1b:2a2b:3a3b:4a4b:5a5b:6a6b:7a7b:8a8b", addr, false)); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "1a1b2a2b3a3b4a4b5a5b6a6b7a7b8a8b"); s.clear(); BOOST_REQUIRE(addr.SetSpecial("6hzph5hv6337r6p2.onion")); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "fd87d87eeb43f1f2f3f4f5f6f7f8f9fa"); s.clear(); BOOST_REQUIRE(addr.SetSpecial( "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion")); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "00000000000000000000000000000000"); s.clear(); addr.SetInternal("a"); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "fd6b88c08724ca978112ca1bbdcafac2"); s.clear(); } BOOST_AUTO_TEST_CASE(cnetaddr_serialize_v2) { CNetAddr addr; CDataStream s(SER_NETWORK, PROTOCOL_VERSION); // Add ADDRV2_FORMAT to the version so that the CNetAddr // serialize method produces an address in v2 format. s.SetVersion(s.GetVersion() | ADDRV2_FORMAT); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "021000000000000000000000000000000000"); s.clear(); BOOST_REQUIRE(LookupHost("1.2.3.4", addr, false)); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "010401020304"); s.clear(); BOOST_REQUIRE( LookupHost("1a1b:2a2b:3a3b:4a4b:5a5b:6a6b:7a7b:8a8b", addr, false)); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "02101a1b2a2b3a3b4a4b5a5b6a6b7a7b8a8b"); s.clear(); BOOST_REQUIRE(addr.SetSpecial("6hzph5hv6337r6p2.onion")); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "030af1f2f3f4f5f6f7f8f9fa"); s.clear(); BOOST_REQUIRE(addr.SetSpecial( "kpgvmscirrdqpekbqjsvw5teanhatztpp2gl6eee4zkowvwfxwenqaid.onion")); s << addr; BOOST_CHECK_EQUAL( HexStr(s), "042053cd5648488c4707914182655b7664034e09e66f7e8cbf1084e654eb56c5bd88"); s.clear(); BOOST_REQUIRE(addr.SetInternal("a")); s << addr; BOOST_CHECK_EQUAL(HexStr(s), "0210fd6b88c08724ca978112ca1bbdcafac2"); s.clear(); } BOOST_AUTO_TEST_CASE(cnetaddr_unserialize_v2) { CNetAddr addr; CDataStream s(SER_NETWORK, PROTOCOL_VERSION); // Add ADDRV2_FORMAT to the version so that the CNetAddr // unserialize method expects an address in v2 format. s.SetVersion(s.GetVersion() | ADDRV2_FORMAT); // Valid IPv4. s << MakeSpan(ParseHex("01" // network type (IPv4) "04" // address length "01020304")); // address s >> addr; BOOST_CHECK(addr.IsValid()); BOOST_CHECK(addr.IsIPv4()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "1.2.3.4"); BOOST_REQUIRE(s.empty()); // Invalid IPv4, valid length but address itself is shorter. s << MakeSpan(ParseHex("01" // network type (IPv4) "04" // address length "0102")); // address BOOST_CHECK_EXCEPTION(s >> addr, std::ios_base::failure, HasReason("end of data")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Invalid IPv4, with bogus length. s << MakeSpan(ParseHex("01" // network type (IPv4) "05" // address length "01020304")); // address BOOST_CHECK_EXCEPTION( s >> addr, std::ios_base::failure, HasReason("BIP155 IPv4 address with length 5 (should be 4)")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Invalid IPv4, with extreme length. s << MakeSpan(ParseHex("01" // network type (IPv4) "fd0102" // address length (513 as CompactSize) "01020304")); // address BOOST_CHECK_EXCEPTION(s >> addr, std::ios_base::failure, HasReason("Address too long: 513 > 512")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Valid IPv6. s << MakeSpan(ParseHex("02" // network type (IPv6) "10" // address length "0102030405060708090a0b0c0d0e0f10")); // address s >> addr; BOOST_CHECK(addr.IsValid()); BOOST_CHECK(addr.IsIPv6()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "102:304:506:708:90a:b0c:d0e:f10"); BOOST_REQUIRE(s.empty()); // Valid IPv6, contains embedded "internal". s << MakeSpan( ParseHex("02" // network type (IPv6) "10" // address length "fd6b88c08724ca978112ca1bbdcafac2")); // address: 0xfd + // sha256("bitcoin")[0:5] // + sha256(name)[0:10] s >> addr; BOOST_CHECK(addr.IsInternal()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "zklycewkdo64v6wc.internal"); BOOST_REQUIRE(s.empty()); // Invalid IPv6, with bogus length. s << MakeSpan(ParseHex("02" // network type (IPv6) "04" // address length "00")); // address BOOST_CHECK_EXCEPTION( s >> addr, std::ios_base::failure, HasReason("BIP155 IPv6 address with length 4 (should be 16)")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Invalid IPv6, contains embedded IPv4. s << MakeSpan(ParseHex("02" // network type (IPv6) "10" // address length "00000000000000000000ffff01020304")); // address s >> addr; BOOST_CHECK(!addr.IsValid()); BOOST_REQUIRE(s.empty()); // Invalid IPv6, contains embedded TORv2. s << MakeSpan(ParseHex("02" // network type (IPv6) "10" // address length "fd87d87eeb430102030405060708090a")); // address s >> addr; BOOST_CHECK(!addr.IsValid()); BOOST_REQUIRE(s.empty()); // Valid TORv2. s << MakeSpan(ParseHex("03" // network type (TORv2) "0a" // address length "f1f2f3f4f5f6f7f8f9fa")); // address s >> addr; BOOST_CHECK(addr.IsValid()); BOOST_CHECK(addr.IsTor()); + BOOST_CHECK(addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "6hzph5hv6337r6p2.onion"); BOOST_REQUIRE(s.empty()); // Invalid TORv2, with bogus length. s << MakeSpan(ParseHex("03" // network type (TORv2) "07" // address length "00")); // address BOOST_CHECK_EXCEPTION( s >> addr, std::ios_base::failure, HasReason("BIP155 TORv2 address with length 7 (should be 10)")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Valid TORv3. s << MakeSpan(ParseHex("04" // network type (TORv3) "20" // address length "79bcc625184b05194975c28b66b66b04" // address "69f7f6556fb1ac3189a79b40dda32f1f")); s >> addr; BOOST_CHECK(addr.IsValid()); BOOST_CHECK(addr.IsTor()); + BOOST_CHECK(!addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL( addr.ToString(), "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion"); BOOST_REQUIRE(s.empty()); // Invalid TORv3, with bogus length. s << MakeSpan(ParseHex("04" // network type (TORv3) "00" // address length "00" // address )); BOOST_CHECK_EXCEPTION( s >> addr, std::ios_base::failure, HasReason("BIP155 TORv3 address with length 0 (should be 32)")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Valid I2P. s << MakeSpan(ParseHex("05" // network type (I2P) "20" // address length "a2894dabaec08c0051a481a6dac88b64" // address "f98232ae42d4b6fd2fa81952dfe36a87")); s >> addr; BOOST_CHECK(addr.IsValid()); + BOOST_CHECK(addr.IsI2P()); + BOOST_CHECK(!addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL( addr.ToString(), "ukeu3k5oycgaauneqgtnvselmt4yemvoilkln7jpvamvfx7dnkdq.b32.i2p"); BOOST_REQUIRE(s.empty()); // Invalid I2P, with bogus length. s << MakeSpan(ParseHex("05" // network type (I2P) "03" // address length "00" // address )); BOOST_CHECK_EXCEPTION( s >> addr, std::ios_base::failure, HasReason("BIP155 I2P address with length 3 (should be 32)")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Valid CJDNS. s << MakeSpan(ParseHex("06" // network type (CJDNS) "10" // address length "fc000001000200030004000500060007" // address )); s >> addr; BOOST_CHECK(addr.IsValid()); + BOOST_CHECK(addr.IsCJDNS()); + BOOST_CHECK(!addr.IsAddrV1Compatible()); BOOST_CHECK_EQUAL(addr.ToString(), "fc00:1:2:3:4:5:6:7"); BOOST_REQUIRE(s.empty()); // Invalid CJDNS, with bogus length. s << MakeSpan(ParseHex("06" // network type (CJDNS) "01" // address length "00" // address )); BOOST_CHECK_EXCEPTION( s >> addr, std::ios_base::failure, HasReason("BIP155 CJDNS address with length 1 (should be 16)")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Unknown, with extreme length. s << MakeSpan( ParseHex("aa" // network type (unknown) "fe00000002" // address length (CompactSize's MAX_SIZE) "01020304050607" // address )); BOOST_CHECK_EXCEPTION(s >> addr, std::ios_base::failure, HasReason("Address too long: 33554432 > 512")); BOOST_REQUIRE(!s.empty()); // The stream is not consumed on invalid input. s.clear(); // Unknown, with reasonable length. s << MakeSpan(ParseHex("aa" // network type (unknown) "04" // address length "01020304" // address )); s >> addr; BOOST_CHECK(!addr.IsValid()); BOOST_REQUIRE(s.empty()); // Unknown, with zero length. s << MakeSpan(ParseHex("aa" // network type (unknown) "00" // address length "" // address )); s >> addr; BOOST_CHECK(!addr.IsValid()); BOOST_REQUIRE(s.empty()); } // prior to PR #14728, this test triggers an undefined behavior BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) { // set up local addresses; all that's necessary to reproduce the bug is // that a normal IPv4 address is among the entries, but if this address is // !IsRoutable the undefined behavior is easier to trigger deterministically { LOCK(cs_mapLocalHost); in_addr ipv4AddrLocal; ipv4AddrLocal.s_addr = 0x0100007f; CNetAddr addr = CNetAddr(ipv4AddrLocal); LocalServiceInfo lsi; lsi.nScore = 23; lsi.nPort = 42; mapLocalHost[addr] = lsi; } // create a peer with an IPv4 address in_addr ipv4AddrPeer; ipv4AddrPeer.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK); std::unique_ptr pnode = std::make_unique( 0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 in6_addr ipv6AddrLocal; memset(ipv6AddrLocal.s6_addr, 0, 16); ipv6AddrLocal.s6_addr[0] = 0xcc; CAddress addrLocal = CAddress(CService(ipv6AddrLocal, 7777), NODE_NETWORK); pnode->SetAddrLocal(addrLocal); // before patch, this causes undefined behavior detectable with clang's // -fsanitize=memory AdvertiseLocal(&*pnode); // suppress no-checks-run warning; if this test fails, it's by triggering a // sanitizer BOOST_CHECK(1); } BOOST_AUTO_TEST_CASE(PoissonNextSend) { g_mock_deterministic_tests = true; int64_t now = 5000; int average_interval_seconds = 600; auto poisson = ::PoissonNextSend(now, average_interval_seconds); std::chrono::microseconds poisson_chrono = ::PoissonNextSend(std::chrono::microseconds{now}, std::chrono::seconds{average_interval_seconds}); BOOST_CHECK_EQUAL(poisson, poisson_chrono.count()); g_mock_deterministic_tests = false; } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 6da32cc3e..a142829eb 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -1,71 +1,71 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test addr relay """ from test_framework.messages import ( CAddress, NODE_NETWORK, msg_addr, ) from test_framework.mininode import ( P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, ) import time ADDRS = [] for i in range(10): addr = CAddress() addr.time = int(time.time()) + i addr.nServices = NODE_NETWORK addr.ip = "123.123.123.{}".format(i % 256) addr.port = 8333 + i ADDRS.append(addr) class AddrReceiver(P2PInterface): def on_addr(self, message): for addr in message.addrs: assert_equal(addr.nServices, NODE_NETWORK) assert addr.ip.startswith('123.123.123.') assert (8333 <= addr.port < 8343) class AddrTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): self.log.info('Create connection that sends addr messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = msg_addr() self.log.info('Send too large addr message') msg.addrs = ADDRS * 101 - with self.nodes[0].assert_debug_log(['message addr size() = 1010']): + with self.nodes[0].assert_debug_log(['addr message size = 1010']): addr_source.send_and_ping(msg) self.log.info( 'Check that addr message content is relayed and added to addrman') addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) msg.addrs = ADDRS with self.nodes[0].assert_debug_log([ 'Added 10 addresses from 127.0.0.1: 0 tried', 'received: addr (301 bytes) peer=0', 'sending addr (301 bytes) peer=1', ]): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) addr_receiver.sync_with_ping() if __name__ == '__main__': AddrTest().main() diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addrv2_relay.py similarity index 60% copy from test/functional/p2p_addr_relay.py copy to test/functional/p2p_addrv2_relay.py index 6da32cc3e..f1aa0d17c 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addrv2_relay.py @@ -1,71 +1,79 @@ #!/usr/bin/env python3 # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ -Test addr relay +Test addrv2 relay """ +import time + from test_framework.messages import ( CAddress, + msg_addrv2, NODE_NETWORK, - msg_addr, -) -from test_framework.mininode import ( - P2PInterface, ) +from test_framework.mininode import P2PInterface from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import ( - assert_equal, -) -import time +from test_framework.util import assert_equal ADDRS = [] for i in range(10): addr = CAddress() addr.time = int(time.time()) + i addr.nServices = NODE_NETWORK addr.ip = "123.123.123.{}".format(i % 256) addr.port = 8333 + i ADDRS.append(addr) class AddrReceiver(P2PInterface): - def on_addr(self, message): + addrv2_received_and_checked = False + + def __init__(self): + super().__init__(support_addrv2=True) + + def on_addrv2(self, message): for addr in message.addrs: assert_equal(addr.nServices, NODE_NETWORK) assert addr.ip.startswith('123.123.123.') assert (8333 <= addr.port < 8343) + self.addrv2_received_and_checked = True + + def wait_for_addrv2(self): + self.wait_until(lambda: "addrv2" in self.last_message) class AddrTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False + self.setup_clean_chain = True self.num_nodes = 1 def run_test(self): - self.log.info('Create connection that sends addr messages') + self.log.info('Create connection that sends addrv2 messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) - msg = msg_addr() + msg = msg_addrv2() - self.log.info('Send too large addr message') + self.log.info('Send too-large addrv2 message') msg.addrs = ADDRS * 101 - with self.nodes[0].assert_debug_log(['message addr size() = 1010']): + with self.nodes[0].assert_debug_log(['addrv2 message size = 1010']): addr_source.send_and_ping(msg) self.log.info( - 'Check that addr message content is relayed and added to addrman') + 'Check that addrv2 message content is relayed and added to addrman') addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) msg.addrs = ADDRS with self.nodes[0].assert_debug_log([ 'Added 10 addresses from 127.0.0.1: 0 tried', - 'received: addr (301 bytes) peer=0', - 'sending addr (301 bytes) peer=1', + 'received: addrv2 (131 bytes) peer=0', + 'sending addrv2 (131 bytes) peer=1', ]): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) - addr_receiver.sync_with_ping() + addr_receiver.wait_for_addrv2() + + assert addr_receiver.addrv2_received_and_checked if __name__ == '__main__': AddrTest().main() diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py index b7a159478..75a1763e4 100755 --- a/test/functional/p2p_invalid_messages.py +++ b/test/functional/p2p_invalid_messages.py @@ -1,261 +1,375 @@ #!/usr/bin/env python3 # Copyright (c) 2015-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test node responses to invalid network messages.""" import asyncio import struct +import time from test_framework.messages import ( CBlockHeader, CInv, msg_getdata, msg_headers, msg_inv, msg_ping, MSG_TX, ser_string, ) from test_framework.mininode import ( NetworkThread, P2PDataStore, P2PInterface, ) from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import hex_str_to_bytes class msg_unrecognized: """Nonsensical message. Modeled after similar types in test_framework.messages.""" msgtype = b'badmsg' def __init__(self, *, str_data): self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data def serialize(self): return ser_string(self.str_data) def __repr__(self): return "{}(data={})".format(self.msgtype, self.str_data) +class SenderOfAddrV2(P2PInterface): + def wait_for_sendaddrv2(self): + self.wait_until(lambda: 'sendaddrv2' in self.last_message) + + class InvalidMessagesTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 self.setup_clean_chain = True def run_test(self): """ . Test msg header 0. Send a bunch of large (2MB) messages of an unrecognized type. Check to see that it isn't an effective DoS against the node. 1. Send an oversized (2MB+) message and check that we're disconnected. 2. Send a few messages with an incorrect data size in the header, ensure the messages are ignored. """ self.test_magic_bytes() self.test_checksum() self.test_size() self.test_msgtype() + self.test_addrv2_empty() + self.test_addrv2_no_addresses() + self.test_addrv2_too_long_address() + self.test_addrv2_unrecognized_network() self.test_large_inv() node = self.nodes[0] self.node = node node.add_p2p_connection(P2PDataStore()) conn2 = node.add_p2p_connection(P2PDataStore()) # 2MB, per MAX_PROTOCOL_MESSAGE_LENGTH msg_limit = 2 * 1024 * 1024 # Account for the 4-byte length prefix valid_data_limit = msg_limit - 5 # # 0. # # Send as large a message as is valid, ensure we aren't disconnected but # also can't exhaust resources. # msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit) assert len(msg_at_size.serialize()) == msg_limit self.log.info( "Sending a bunch of large, junk messages to test memory exhaustion. May take a bit...") # Run a bunch of times to test for memory exhaustion. for _ in range(80): node.p2p.send_message(msg_at_size) # Check that, even though the node is being hammered by nonsense from one # connection, it can still service other peers in a timely way. for _ in range(20): conn2.sync_with_ping(timeout=2) # Peer 1, despite serving up a bunch of nonsense, should still be # connected. self.log.info("Waiting for node to drop junk messages.") node.p2p.sync_with_ping(timeout=320) assert node.p2p.is_connected # # 1. # # Send an oversized message, ensure we're disconnected. # msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1)) assert len(msg_over_size.serialize()) == (msg_limit + 1) with node.assert_debug_log(["Oversized header detected"]): # An unknown message type (or *any* message type) over # MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect. node.p2p.send_message(msg_over_size) node.p2p.wait_for_disconnect(timeout=4) node.disconnect_p2ps() conn = node.add_p2p_connection(P2PDataStore()) conn.wait_for_verack() # # 2. # # Send messages with an incorrect data size in the header. # actual_size = 100 msg = msg_unrecognized(str_data="b" * actual_size) # TODO: handle larger-than cases. I haven't been able to pin down what # behavior to expect. for wrong_size in (2, 77, 78, 79): self.log.info( "Sending a message with incorrect size of {}".format(wrong_size)) # Unmodified message should submit okay. node.p2p.send_and_ping(msg) # A message lying about its data size results in a disconnect when the incorrect # data size is less than the actual size. # # TODO: why does behavior change at 78 bytes? # node.p2p.send_raw_message( self._tweak_msg_data_size( msg, wrong_size)) # For some reason unknown to me, we sometimes have to push additional data to the # peer in order for it to realize a disconnect. try: node.p2p.send_message(msg_ping(nonce=123123)) except IOError: pass node.p2p.wait_for_disconnect(timeout=10) node.disconnect_p2ps() node.add_p2p_connection(P2PDataStore()) # Node is still up. conn = node.add_p2p_connection(P2PDataStore()) def test_magic_bytes(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) async def swap_magic_bytes(): # Need to ignore all incoming messages from now, since they come # with "invalid" magic bytes conn._on_data = lambda: None conn.magic_bytes = b'\x00\x11\x22\x32' # Call .result() to block until the atomic swap is complete, otherwise # we might run into races later on asyncio.run_coroutine_threadsafe( swap_magic_bytes(), NetworkThread.network_event_loop).result() with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']): conn.send_message(msg_ping(nonce=0xff)) conn.wait_for_disconnect(timeout=1) self.nodes[0].disconnect_p2ps() def test_checksum(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']): msg = conn.build_message(msg_unrecognized(str_data="d")) cut_len = ( # magic 4 + # msgtype 12 + # len 4 ) # modify checksum msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:] self.nodes[0].p2p.send_raw_message(msg) conn.wait_for_disconnect() self.nodes[0].disconnect_p2ps() def test_size(self): conn = self.nodes[0].add_p2p_connection(P2PDataStore()) with self.nodes[0].assert_debug_log(['']): msg = conn.build_message(msg_unrecognized(str_data="d")) cut_len = ( # magic 4 + # command 12 ) # modify len to MAX_SIZE + 1 msg = msg[:cut_len] + \ struct.pack(" 512', + ], + hex_str_to_bytes( + # number of entries + '01' + + # time, Fri Jan 9 02:54:25 UTC 2009 + '61bc6649' + + # service flags, COMPACTSIZE(NODE_NONE) + '00' + + # network type (IPv4) + '01' + + # address length (COMPACTSIZE(513)) + 'fd0102' + + # address + 'ab' * 513 + + # port + '208d')) + + def test_addrv2_unrecognized_network(self): + now_hex = struct.pack(' 20): oversized-inv: message inv size() = 50001']): + with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=8 (0 -> 20): oversized-inv: message inv size() = 50001']): msg = msg_inv([CInv(MSG_TX, 1)] * 50001) conn.send_and_ping(msg) - with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (20 -> 40): too-many-inv: message getdata size() = 50001']): + with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=8 (20 -> 40): too-many-inv: message getdata size() = 50001']): msg = msg_getdata([CInv(MSG_TX, 1)] * 50001) conn.send_and_ping(msg) - with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=4 (40 -> 60): too-many-headers: headers message size = 2001']): + with self.nodes[0].assert_debug_log(['Misbehaving', 'peer=8 (40 -> 60): too-many-headers: headers message size = 2001']): msg = msg_headers([CBlockHeader()] * 2001) conn.send_and_ping(msg) self.nodes[0].disconnect_p2ps() def _tweak_msg_data_size(self, message, wrong_size): """ Return a raw message based on another message but with an incorrect data size in the message header. """ raw_msg = self.node.p2p.build_message(message) bad_size_bytes = struct.pack("> 2 FILTER_TYPE_BASIC = 0 # Serialization/deserialization tools def sha256(s): return hashlib.new('sha256', s).digest() def hash256(s): return sha256(sha256(s)) def ser_compact_size(size): r = b"" if size < 253: r = struct.pack("B", size) elif size < 0x10000: r = struct.pack(">= 32 return rs def uint256_from_str(s): r = 0 t = struct.unpack("> 24) & 0xFF v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) return v -def deser_vector(f, c): +# deser_function_name: Allow for an alternate deserialization function on the +# entries in the vector. +def deser_vector(f, c, deser_function_name=None): nit = deser_compact_size(f) r = [] for i in range(nit): t = c() - t.deserialize(f) + if deser_function_name: + getattr(t, deser_function_name)(f) + else: + t.deserialize(f) r.append(t) return r # ser_function_name: Allow for an alternate serialization function on the # entries in the vector. def ser_vector(v, ser_function_name=None): r = ser_compact_size(len(v)) for i in v: if ser_function_name: r += getattr(i, ser_function_name)() else: r += i.serialize() return r def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_uint256(f) r.append(t) return r def ser_uint256_vector(v): r = ser_compact_size(len(v)) for i in v: r += ser_uint256(i) return r def deser_string_vector(f): nit = deser_compact_size(f) r = [] for i in range(nit): t = deser_string(f) r.append(t) return r def ser_string_vector(v): r = ser_compact_size(len(v)) for sv in v: r += ser_string(sv) return r def FromHex(obj, hex_string): """Deserialize from a hex string representation (eg from RPC)""" obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj def ToHex(obj): """Convert a binary-serializable object to hex (eg for submission via RPC)""" return obj.serialize().hex() # Objects that map to bitcoind objects, which can be serialized/deserialized class CAddress: - __slots__ = ("ip", "nServices", "pchReserved", "port", "time") + __slots__ = ("net", "ip", "nServices", "port", "time") + + # see https://github.com/bitcoin/bips/blob/master/bip-0155.mediawiki + NET_IPV4 = 1 + + ADDRV2_NET_NAME = { + NET_IPV4: "IPv4" + } + + ADDRV2_ADDRESS_LENGTH = { + NET_IPV4: 4 + } def __init__(self): self.time = 0 self.nServices = 1 - self.pchReserved = b"\x00" * 10 + b"\xff" * 2 + self.net = self.NET_IPV4 self.ip = "0.0.0.0" self.port = 0 def deserialize(self, f, *, with_time=True): + """Deserialize from addrv1 format (pre-BIP155)""" if with_time: # VERSION messages serialize CAddress objects without time - self.time = struct.unpack("H", f.read(2))[0] def serialize(self, *, with_time=True): + """Serialize in addrv1 format (pre-BIP155)""" + assert self.net == self.NET_IPV4 r = b"" if with_time: # VERSION messages serialize CAddress objects without time - r += struct.pack("H", self.port) + return r + + def deserialize_v2(self, f): + """Deserialize from addrv2 format (BIP155)""" + self.time = struct.unpack("H", f.read(2))[0] + + def serialize_v2(self): + """Serialize in addrv2 format (BIP155)""" + assert self.net == self.NET_IPV4 + r = b"" + r += struct.pack("H", self.port) return r def __repr__(self): - return "CAddress(nServices={} ip={} port={})".format( - self.nServices, self.ip, self.port) + return ("CAddress(nServices=%i net=%s addr=%s port=%i)" + % (self.nServices, self.ADDRV2_NET_NAME[self.net], self.ip, self.port)) class CInv: __slots__ = ("hash", "type") typemap = { 0: "Error", MSG_TX: "TX", MSG_BLOCK: "Block", MSG_FILTERED_BLOCK: "filtered Block", MSG_CMPCT_BLOCK: "CompactBlock" } def __init__(self, t=0, h=0): self.type = t self.hash = h def deserialize(self, f): self.type = struct.unpack(" 21000000 * COIN: return False return True def __repr__(self): return "CTransaction(nVersion={} vin={} vout={} nLockTime={})".format( self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime) class CBlockHeader: __slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce", "nTime", "nVersion", "sha256") def __init__(self, header=None): if header is None: self.set_null() else: self.nVersion = header.nVersion self.hashPrevBlock = header.hashPrevBlock self.hashMerkleRoot = header.hashMerkleRoot self.nTime = header.nTime self.nBits = header.nBits self.nNonce = header.nNonce self.sha256 = header.sha256 self.hash = header.hash self.calc_sha256() def set_null(self): self.nVersion = 1 self.hashPrevBlock = 0 self.hashMerkleRoot = 0 self.nTime = 0 self.nBits = 0 self.nNonce = 0 self.sha256 = None self.hash = None def deserialize(self, f): self.nVersion = struct.unpack(" 1: newhashes = [] for i in range(0, len(hashes), 2): i2 = min(i + 1, len(hashes) - 1) newhashes.append(hash256(hashes[i] + hashes[i2])) hashes = newhashes return uint256_from_str(hashes[0]) def calc_merkle_root(self): hashes = [] for tx in self.vtx: tx.calc_sha256() hashes.append(ser_uint256(tx.sha256)) return self.get_merkle_root(hashes) def is_valid(self): self.calc_sha256() target = uint256_from_compact(self.nBits) if self.sha256 > target: return False for tx in self.vtx: if not tx.is_valid(): return False if self.calc_merkle_root() != self.hashMerkleRoot: return False return True def solve(self): self.rehash() target = uint256_from_compact(self.nBits) while self.sha256 > target: self.nNonce += 1 self.rehash() def __repr__(self): return "CBlock(nVersion={} hashPrevBlock={:064x} hashMerkleRoot={:064x} nTime={} nBits={:08x} nNonce={:08x} vtx={})".format( self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nTime, self.nBits, self.nNonce, repr(self.vtx)) class PrefilledTransaction: __slots__ = ("index", "tx") def __init__(self, index=0, tx=None): self.index = index self.tx = tx def deserialize(self, f): self.index = deser_compact_size(f) self.tx = CTransaction() self.tx.deserialize(f) def serialize(self): r = b"" r += ser_compact_size(self.index) r += self.tx.serialize() return r def __repr__(self): return "PrefilledTransaction(index={}, tx={})".format( self.index, repr(self.tx)) # This is what we send on the wire, in a cmpctblock message. class P2PHeaderAndShortIDs: __slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length", "shortids", "shortids_length") def __init__(self): self.header = CBlockHeader() self.nonce = 0 self.shortids_length = 0 self.shortids = [] self.prefilled_txn_length = 0 self.prefilled_txn = [] def deserialize(self, f): self.header.deserialize(f) self.nonce = struct.unpack(" class msg_headers: __slots__ = ("headers",) msgtype = b"headers" def __init__(self, headers=None): self.headers = headers if headers is not None else [] def deserialize(self, f): # comment in bitcoind indicates these should be deserialized as blocks blocks = deser_vector(f, CBlock) for x in blocks: self.headers.append(CBlockHeader(x)) def serialize(self): blocks = [CBlock(x) for x in self.headers] return ser_vector(blocks) def __repr__(self): return "msg_headers(headers={})".format(repr(self.headers)) class msg_merkleblock: __slots__ = ("merkleblock",) msgtype = b"merkleblock" def __init__(self, merkleblock=None): if merkleblock is None: self.merkleblock = CMerkleBlock() else: self.merkleblock = merkleblock def deserialize(self, f): self.merkleblock.deserialize(f) def serialize(self): return self.merkleblock.serialize() def __repr__(self): return "msg_merkleblock(merkleblock={})".format(repr(self.merkleblock)) class msg_filterload: __slots__ = ("data", "nHashFuncs", "nTweak", "nFlags") msgtype = b"filterload" def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0): self.data = data self.nHashFuncs = nHashFuncs self.nTweak = nTweak self.nFlags = nFlags def deserialize(self, f): self.data = deser_string(f) self.nHashFuncs = struct.unpack(" 0: self.recvbuf += t while True: msg = self._on_data() if msg is None: break self.on_message(msg) def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: with mininode_lock: if len(self.recvbuf) < 4: return None if self.recvbuf[:4] != self.magic_bytes: raise ValueError( "magic bytes mismatch: {} != {}".format( repr( self.magic_bytes), repr( self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return None msgtype = self.recvbuf[4:4 + 12].split(b"\x00", 1)[0] msglen = struct.unpack( " 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin Cash node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" - def __init__(self): + def __init__(self, support_addrv2=False): super().__init__() # Track number of messages of each type received and the most recent # message of each type self.message_count = defaultdict(int) self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 + self.support_addrv2 = support_addrv2 + def peer_connect(self, *args, services=NODE_NETWORK, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: # Send a version msg vt = msg_version() vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 # Will be sent soon after connection_made self.on_connection_send_msg = vt return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with mininode_lock: try: msgtype = message.msgtype.decode('ascii') self.message_count[msgtype] += 1 self.last_message[msgtype] = message getattr(self, 'on_' + msgtype)(message) except Exception: print("ERROR delivering {} ({})".format( repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass + def on_addrv2(self, message): pass + def on_avapoll(self, message): pass def on_avaresponse(self, message): pass def on_avahello(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cfcheckpt(self, message): pass def on_cfheaders(self, message): pass def on_cfilter(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_filteradd(self, message): pass def on_filterclear(self, message): pass def on_filterload(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_merkleblock(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass + def on_sendaddrv2(self, message): pass + def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format( message.nVersion, MIN_VERSION_SUPPORTED) self.send_message(msg_verack()) + if self.support_addrv2: + self.send_message(msg_sendaddrv2()) self.nServices = message.nServices # Connection helper methods def wait_until(self, test_function, timeout=60): wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor) def wait_for_disconnect(self, timeout=60): def test_function(): return not self.is_connected self.wait_until(test_function, timeout=timeout) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): assert self.is_connected if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid self.wait_until(test_function, timeout=timeout) def wait_for_block(self, blockhash, timeout=60): def test_function(): assert self.is_connected return self.last_message.get( "block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): assert self.is_connected last_headers = self.last_message.get('headers') if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): assert self.is_connected last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_getdata(self, hash_list, timeout=60): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" def test_function(): assert self.is_connected last_data = self.last_message.get("getdata") if not last_data: return False return [x.hash for x in last_data.inv] == hash_list self.wait_until(test_function, timeout=timeout) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" def test_function(): assert self.is_connected return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError( "wait_for_inv() will only verify the first inv object") def test_function(): assert self.is_connected return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash self.wait_until(test_function, timeout=timeout) def wait_for_verack(self, timeout=60): def test_function(): return self.message_count["verack"] self.wait_until(test_function, timeout=timeout) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) # Sync up with the node def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): assert self.is_connected return self.last_message.get( "pong") and self.last_message["pong"].nonce == self.ping_counter self.wait_until(test_function, timeout=timeout) self.ping_counter += 1 # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. mininode_lock = threading.RLock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be # created assert not self.network_event_loop NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe( self.network_event_loop.stop) wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None class P2PDataStore(P2PInterface): """A P2P data store class. Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug( 'getdata message type {} received.'.format(hex(inv.type))) def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] maxheaders = 2000 while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader( self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format( hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-maxheaders - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with mininode_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 def test(): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message( msg_headers([CBlockHeader(block) for block in blocks])) self.wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with mininode_lock: for tx in txs: self.tx_store[tx.sha256] = tx def test(): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, "{} not found in mempool".format( tx.hash) else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, "{} tx found in mempool".format( tx.hash) if reject_reason: with node.assert_debug_log(expected_msgs=[reject_reason]): test() else: test() class P2PTxInvStore(P2PInterface): """A P2PInterface which stores a count of how many times each txid has been announced.""" def __init__(self): super().__init__() self.tx_invs_received = defaultdict(int) def on_inv(self, message): # Send getdata in response. super().on_inv(message) # Store how many times invs have been received for each tx. for i in message.inv: if i.type == MSG_TX: # save txid self.tx_invs_received[i.hash] += 1 def get_invs(self): with mininode_lock: return list(self.tx_invs_received.keys()) def wait_for_broadcast(self, txns, timeout=60): """Waits for the txns (list of txids) to complete initial broadcast. The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. self.wait_until(lambda: set(self.get_invs()) == set( [int(tx, 16) for tx in txns]), timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping()